| OLD | NEW |
| (Empty) |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "components/scheduler/base/task_queue_impl.h" | |
| 6 | |
| 7 #include "base/trace_event/blame_context.h" | |
| 8 #include "components/scheduler/base/task_queue_manager.h" | |
| 9 #include "components/scheduler/base/task_queue_manager_delegate.h" | |
| 10 #include "components/scheduler/base/time_domain.h" | |
| 11 #include "components/scheduler/base/work_queue.h" | |
| 12 | |
| 13 namespace scheduler { | |
| 14 namespace internal { | |
| 15 | |
| 16 TaskQueueImpl::TaskQueueImpl( | |
| 17 TaskQueueManager* task_queue_manager, | |
| 18 TimeDomain* time_domain, | |
| 19 const Spec& spec, | |
| 20 const char* disabled_by_default_tracing_category, | |
| 21 const char* disabled_by_default_verbose_tracing_category) | |
| 22 : thread_id_(base::PlatformThread::CurrentId()), | |
| 23 any_thread_(task_queue_manager, spec.pump_policy, time_domain), | |
| 24 name_(spec.name), | |
| 25 disabled_by_default_tracing_category_( | |
| 26 disabled_by_default_tracing_category), | |
| 27 disabled_by_default_verbose_tracing_category_( | |
| 28 disabled_by_default_verbose_tracing_category), | |
| 29 main_thread_only_(task_queue_manager, | |
| 30 spec.pump_policy, | |
| 31 this, | |
| 32 time_domain), | |
| 33 wakeup_policy_(spec.wakeup_policy), | |
| 34 should_monitor_quiescence_(spec.should_monitor_quiescence), | |
| 35 should_notify_observers_(spec.should_notify_observers), | |
| 36 should_report_when_execution_blocked_( | |
| 37 spec.should_report_when_execution_blocked) { | |
| 38 DCHECK(time_domain); | |
| 39 time_domain->RegisterQueue(this); | |
| 40 } | |
| 41 | |
| 42 TaskQueueImpl::~TaskQueueImpl() { | |
| 43 #if DCHECK_IS_ON() | |
| 44 base::AutoLock lock(any_thread_lock_); | |
| 45 // NOTE this check shouldn't fire because |TaskQueueManager::queues_| | |
| 46 // contains a strong reference to this TaskQueueImpl and the TaskQueueManager | |
| 47 // destructor calls UnregisterTaskQueue on all task queues. | |
| 48 DCHECK(any_thread().task_queue_manager == nullptr) | |
| 49 << "UnregisterTaskQueue must be called first!"; | |
| 50 | |
| 51 #endif | |
| 52 } | |
| 53 | |
| 54 TaskQueueImpl::Task::Task() | |
| 55 : PendingTask(tracked_objects::Location(), | |
| 56 base::Closure(), | |
| 57 base::TimeTicks(), | |
| 58 true), | |
| 59 #ifndef NDEBUG | |
| 60 enqueue_order_set_(false), | |
| 61 #endif | |
| 62 enqueue_order_(0) { | |
| 63 sequence_num = 0; | |
| 64 } | |
| 65 | |
| 66 TaskQueueImpl::Task::Task(const tracked_objects::Location& posted_from, | |
| 67 const base::Closure& task, | |
| 68 base::TimeTicks desired_run_time, | |
| 69 EnqueueOrder sequence_number, | |
| 70 bool nestable) | |
| 71 : PendingTask(posted_from, task, desired_run_time, nestable), | |
| 72 #ifndef NDEBUG | |
| 73 enqueue_order_set_(false), | |
| 74 #endif | |
| 75 enqueue_order_(0) { | |
| 76 sequence_num = sequence_number; | |
| 77 } | |
| 78 | |
| 79 TaskQueueImpl::Task::Task(const tracked_objects::Location& posted_from, | |
| 80 const base::Closure& task, | |
| 81 base::TimeTicks desired_run_time, | |
| 82 EnqueueOrder sequence_number, | |
| 83 bool nestable, | |
| 84 EnqueueOrder enqueue_order) | |
| 85 : PendingTask(posted_from, task, desired_run_time, nestable), | |
| 86 #ifndef NDEBUG | |
| 87 enqueue_order_set_(true), | |
| 88 #endif | |
| 89 enqueue_order_(enqueue_order) { | |
| 90 sequence_num = sequence_number; | |
| 91 } | |
| 92 | |
| 93 TaskQueueImpl::AnyThread::AnyThread(TaskQueueManager* task_queue_manager, | |
| 94 PumpPolicy pump_policy, | |
| 95 TimeDomain* time_domain) | |
| 96 : task_queue_manager(task_queue_manager), | |
| 97 pump_policy(pump_policy), | |
| 98 time_domain(time_domain) {} | |
| 99 | |
| 100 TaskQueueImpl::AnyThread::~AnyThread() {} | |
| 101 | |
| 102 TaskQueueImpl::MainThreadOnly::MainThreadOnly( | |
| 103 TaskQueueManager* task_queue_manager, | |
| 104 PumpPolicy pump_policy, | |
| 105 TaskQueueImpl* task_queue, | |
| 106 TimeDomain* time_domain) | |
| 107 : task_queue_manager(task_queue_manager), | |
| 108 pump_policy(pump_policy), | |
| 109 time_domain(time_domain), | |
| 110 delayed_work_queue(new WorkQueue(task_queue, "delayed")), | |
| 111 immediate_work_queue(new WorkQueue(task_queue, "immediate")), | |
| 112 set_index(0), | |
| 113 is_enabled(true), | |
| 114 blame_context(nullptr) {} | |
| 115 | |
| 116 TaskQueueImpl::MainThreadOnly::~MainThreadOnly() {} | |
| 117 | |
| 118 void TaskQueueImpl::UnregisterTaskQueue() { | |
| 119 base::AutoLock lock(any_thread_lock_); | |
| 120 if (main_thread_only().time_domain) | |
| 121 main_thread_only().time_domain->UnregisterQueue(this); | |
| 122 if (!any_thread().task_queue_manager) | |
| 123 return; | |
| 124 any_thread().time_domain = nullptr; | |
| 125 main_thread_only().time_domain = nullptr; | |
| 126 any_thread().task_queue_manager->UnregisterTaskQueue(this); | |
| 127 | |
| 128 any_thread().task_queue_manager = nullptr; | |
| 129 main_thread_only().task_queue_manager = nullptr; | |
| 130 main_thread_only().delayed_incoming_queue = std::priority_queue<Task>(); | |
| 131 any_thread().immediate_incoming_queue = std::queue<Task>(); | |
| 132 main_thread_only().immediate_work_queue.reset(); | |
| 133 main_thread_only().delayed_work_queue.reset(); | |
| 134 } | |
| 135 | |
| 136 bool TaskQueueImpl::RunsTasksOnCurrentThread() const { | |
| 137 base::AutoLock lock(any_thread_lock_); | |
| 138 return base::PlatformThread::CurrentId() == thread_id_; | |
| 139 } | |
| 140 | |
| 141 bool TaskQueueImpl::PostDelayedTask(const tracked_objects::Location& from_here, | |
| 142 const base::Closure& task, | |
| 143 base::TimeDelta delay) { | |
| 144 if (delay.is_zero()) | |
| 145 return PostImmediateTaskImpl(from_here, task, TaskType::NORMAL); | |
| 146 | |
| 147 return PostDelayedTaskImpl(from_here, task, delay, TaskType::NORMAL); | |
| 148 } | |
| 149 | |
| 150 bool TaskQueueImpl::PostNonNestableDelayedTask( | |
| 151 const tracked_objects::Location& from_here, | |
| 152 const base::Closure& task, | |
| 153 base::TimeDelta delay) { | |
| 154 if (delay.is_zero()) | |
| 155 return PostImmediateTaskImpl(from_here, task, TaskType::NON_NESTABLE); | |
| 156 | |
| 157 return PostDelayedTaskImpl(from_here, task, delay, TaskType::NON_NESTABLE); | |
| 158 } | |
| 159 | |
| 160 bool TaskQueueImpl::PostImmediateTaskImpl( | |
| 161 const tracked_objects::Location& from_here, | |
| 162 const base::Closure& task, | |
| 163 TaskType task_type) { | |
| 164 base::AutoLock lock(any_thread_lock_); | |
| 165 if (!any_thread().task_queue_manager) | |
| 166 return false; | |
| 167 | |
| 168 EnqueueOrder sequence_number = | |
| 169 any_thread().task_queue_manager->GetNextSequenceNumber(); | |
| 170 | |
| 171 PushOntoImmediateIncomingQueueLocked( | |
| 172 Task(from_here, task, base::TimeTicks(), sequence_number, | |
| 173 task_type != TaskType::NON_NESTABLE, sequence_number)); | |
| 174 return true; | |
| 175 } | |
| 176 | |
| 177 bool TaskQueueImpl::PostDelayedTaskImpl( | |
| 178 const tracked_objects::Location& from_here, | |
| 179 const base::Closure& task, | |
| 180 base::TimeDelta delay, | |
| 181 TaskType task_type) { | |
| 182 DCHECK_GT(delay, base::TimeDelta()); | |
| 183 if (base::PlatformThread::CurrentId() == thread_id_) { | |
| 184 // Lock-free fast path for delayed tasks posted from the main thread. | |
| 185 if (!main_thread_only().task_queue_manager) | |
| 186 return false; | |
| 187 | |
| 188 EnqueueOrder sequence_number = | |
| 189 main_thread_only().task_queue_manager->GetNextSequenceNumber(); | |
| 190 | |
| 191 base::TimeTicks time_domain_now = main_thread_only().time_domain->Now(); | |
| 192 base::TimeTicks time_domain_delayed_run_time = time_domain_now + delay; | |
| 193 PushOntoDelayedIncomingQueueFromMainThread( | |
| 194 Task(from_here, task, time_domain_delayed_run_time, sequence_number, | |
| 195 task_type != TaskType::NON_NESTABLE), | |
| 196 time_domain_now); | |
| 197 } else { | |
| 198 // NOTE posting a delayed task from a different thread is not expected to | |
| 199 // be common. This pathway is less optimal than perhaps it could be | |
| 200 // because it causes two main thread tasks to be run. Should this | |
| 201 // assumption prove to be false in future, we may need to revisit this. | |
| 202 base::AutoLock lock(any_thread_lock_); | |
| 203 if (!any_thread().task_queue_manager) | |
| 204 return false; | |
| 205 | |
| 206 EnqueueOrder sequence_number = | |
| 207 any_thread().task_queue_manager->GetNextSequenceNumber(); | |
| 208 | |
| 209 base::TimeTicks time_domain_now = any_thread().time_domain->Now(); | |
| 210 base::TimeTicks time_domain_delayed_run_time = time_domain_now + delay; | |
| 211 PushOntoDelayedIncomingQueueLocked( | |
| 212 Task(from_here, task, time_domain_delayed_run_time, sequence_number, | |
| 213 task_type != TaskType::NON_NESTABLE)); | |
| 214 } | |
| 215 return true; | |
| 216 } | |
| 217 | |
| 218 void TaskQueueImpl::PushOntoDelayedIncomingQueueFromMainThread( | |
| 219 Task pending_task, | |
| 220 base::TimeTicks now) { | |
| 221 main_thread_only().task_queue_manager->DidQueueTask(pending_task); | |
| 222 | |
| 223 // Schedule a later call to MoveReadyDelayedTasksToDelayedWorkQueue. | |
| 224 base::TimeTicks delayed_run_time = pending_task.delayed_run_time; | |
| 225 main_thread_only().delayed_incoming_queue.push(std::move(pending_task)); | |
| 226 main_thread_only().time_domain->ScheduleDelayedWork( | |
| 227 this, delayed_run_time, now); | |
| 228 TraceQueueSize(false); | |
| 229 } | |
| 230 | |
| 231 void TaskQueueImpl::PushOntoDelayedIncomingQueueLocked(Task pending_task) { | |
| 232 any_thread().task_queue_manager->DidQueueTask(pending_task); | |
| 233 | |
| 234 int thread_hop_task_sequence_number = | |
| 235 any_thread().task_queue_manager->GetNextSequenceNumber(); | |
| 236 PushOntoImmediateIncomingQueueLocked(Task( | |
| 237 FROM_HERE, | |
| 238 base::Bind(&TaskQueueImpl::ScheduleDelayedWorkTask, this, | |
| 239 base::Passed(&pending_task)), | |
| 240 base::TimeTicks(), thread_hop_task_sequence_number, false, | |
| 241 thread_hop_task_sequence_number)); | |
| 242 } | |
| 243 | |
| 244 void TaskQueueImpl::PushOntoImmediateIncomingQueueLocked(Task pending_task) { | |
| 245 if (any_thread().immediate_incoming_queue.empty()) | |
| 246 any_thread().time_domain->RegisterAsUpdatableTaskQueue(this); | |
| 247 if (any_thread().pump_policy == PumpPolicy::AUTO && | |
| 248 any_thread().immediate_incoming_queue.empty()) { | |
| 249 any_thread().task_queue_manager->MaybeScheduleImmediateWork(FROM_HERE); | |
| 250 } | |
| 251 any_thread().task_queue_manager->DidQueueTask(pending_task); | |
| 252 any_thread().immediate_incoming_queue.push(std::move(pending_task)); | |
| 253 TraceQueueSize(true); | |
| 254 } | |
| 255 | |
| 256 void TaskQueueImpl::ScheduleDelayedWorkTask(Task pending_task) { | |
| 257 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 258 base::TimeTicks delayed_run_time = pending_task.delayed_run_time; | |
| 259 main_thread_only().delayed_incoming_queue.push(std::move(pending_task)); | |
| 260 main_thread_only().time_domain->ScheduleDelayedWork( | |
| 261 this, delayed_run_time, | |
| 262 main_thread_only().time_domain->Now()); | |
| 263 } | |
| 264 | |
| 265 void TaskQueueImpl::SetQueueEnabled(bool enabled) { | |
| 266 if (main_thread_only().is_enabled == enabled) | |
| 267 return; | |
| 268 main_thread_only().is_enabled = enabled; | |
| 269 if (!main_thread_only().task_queue_manager) | |
| 270 return; | |
| 271 if (enabled) { | |
| 272 main_thread_only().task_queue_manager->selector_.EnableQueue(this); | |
| 273 } else { | |
| 274 main_thread_only().task_queue_manager->selector_.DisableQueue(this); | |
| 275 } | |
| 276 } | |
| 277 | |
| 278 bool TaskQueueImpl::IsQueueEnabled() const { | |
| 279 return main_thread_only().is_enabled; | |
| 280 } | |
| 281 | |
| 282 bool TaskQueueImpl::IsEmpty() const { | |
| 283 if (!main_thread_only().delayed_work_queue->Empty() || | |
| 284 !main_thread_only().immediate_work_queue->Empty()) { | |
| 285 return false; | |
| 286 } | |
| 287 | |
| 288 base::AutoLock lock(any_thread_lock_); | |
| 289 return any_thread().immediate_incoming_queue.empty() && | |
| 290 main_thread_only().delayed_incoming_queue.empty(); | |
| 291 } | |
| 292 | |
| 293 bool TaskQueueImpl::HasPendingImmediateWork() const { | |
| 294 if (!main_thread_only().delayed_work_queue->Empty() || | |
| 295 !main_thread_only().immediate_work_queue->Empty()) { | |
| 296 return true; | |
| 297 } | |
| 298 | |
| 299 return NeedsPumping(); | |
| 300 } | |
| 301 | |
| 302 bool TaskQueueImpl::NeedsPumping() const { | |
| 303 if (!main_thread_only().immediate_work_queue->Empty()) | |
| 304 return false; | |
| 305 | |
| 306 base::AutoLock lock(any_thread_lock_); | |
| 307 if (!any_thread().immediate_incoming_queue.empty()) | |
| 308 return true; | |
| 309 | |
| 310 // If there's no immediate Incoming work then we only need pumping if there | |
| 311 // is a delayed task that should be running now. | |
| 312 if (main_thread_only().delayed_incoming_queue.empty()) | |
| 313 return false; | |
| 314 | |
| 315 return main_thread_only().delayed_incoming_queue.top().delayed_run_time <= | |
| 316 main_thread_only().time_domain->CreateLazyNow().Now(); | |
| 317 } | |
| 318 | |
| 319 bool TaskQueueImpl::TaskIsOlderThanQueuedImmediateTasksLocked( | |
| 320 const Task* task) { | |
| 321 // A null task is passed when UpdateQueue is called before any task is run. | |
| 322 // In this case we don't want to pump an after_wakeup queue, so return true | |
| 323 // here. | |
| 324 if (!task) | |
| 325 return true; | |
| 326 | |
| 327 // Return false if task is newer than the oldest immediate task. | |
| 328 if (!any_thread().immediate_incoming_queue.empty() && | |
| 329 task->enqueue_order() > | |
| 330 any_thread().immediate_incoming_queue.front().enqueue_order()) { | |
| 331 return false; | |
| 332 } | |
| 333 return true; | |
| 334 } | |
| 335 | |
| 336 bool TaskQueueImpl::TaskIsOlderThanQueuedDelayedTasks(const Task* task) { | |
| 337 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 338 // A null task is passed when UpdateQueue is called before any task is run. | |
| 339 // In this case we don't want to pump an after_wakeup queue, so return true | |
| 340 // here. | |
| 341 if (!task) | |
| 342 return true; | |
| 343 | |
| 344 EnqueueOrder enqueue_order; | |
| 345 if (!main_thread_only().delayed_work_queue->GetFrontTaskEnqueueOrder( | |
| 346 &enqueue_order)) { | |
| 347 return true; | |
| 348 } | |
| 349 | |
| 350 return task->enqueue_order() < enqueue_order; | |
| 351 } | |
| 352 | |
| 353 bool TaskQueueImpl::ShouldAutoPumpImmediateQueueLocked( | |
| 354 bool should_trigger_wakeup, | |
| 355 const Task* previous_task) { | |
| 356 if (main_thread_only().pump_policy == PumpPolicy::MANUAL) | |
| 357 return false; | |
| 358 if (main_thread_only().pump_policy == PumpPolicy::AFTER_WAKEUP && | |
| 359 (!should_trigger_wakeup || | |
| 360 TaskIsOlderThanQueuedImmediateTasksLocked(previous_task))) | |
| 361 return false; | |
| 362 return true; | |
| 363 } | |
| 364 | |
| 365 bool TaskQueueImpl::ShouldAutoPumpDelayedQueue(bool should_trigger_wakeup, | |
| 366 const Task* previous_task) { | |
| 367 if (main_thread_only().pump_policy == PumpPolicy::MANUAL) | |
| 368 return false; | |
| 369 if (main_thread_only().pump_policy == PumpPolicy::AFTER_WAKEUP && | |
| 370 (!should_trigger_wakeup || | |
| 371 TaskIsOlderThanQueuedDelayedTasks(previous_task))) | |
| 372 return false; | |
| 373 return true; | |
| 374 } | |
| 375 | |
| 376 void TaskQueueImpl::MoveReadyDelayedTasksToDelayedWorkQueue(LazyNow* lazy_now) { | |
| 377 // Enqueue all delayed tasks that should be running now. | |
| 378 while (!main_thread_only().delayed_incoming_queue.empty() && | |
| 379 main_thread_only().delayed_incoming_queue.top().delayed_run_time <= | |
| 380 lazy_now->Now()) { | |
| 381 // Note: the const_cast is needed because there is no direct way to move | |
| 382 // elements out of a priority queue. The queue must not be modified between | |
| 383 // the top() and the pop(). | |
| 384 main_thread_only().delayed_work_queue->PushAndSetEnqueueOrder( | |
| 385 std::move( | |
| 386 const_cast<Task&>(main_thread_only().delayed_incoming_queue.top())), | |
| 387 main_thread_only().task_queue_manager->GetNextSequenceNumber()); | |
| 388 main_thread_only().delayed_incoming_queue.pop(); | |
| 389 } | |
| 390 } | |
| 391 | |
| 392 void TaskQueueImpl::UpdateDelayedWorkQueue(LazyNow* lazy_now, | |
| 393 bool should_trigger_wakeup, | |
| 394 const Task* previous_task) { | |
| 395 if (!main_thread_only().task_queue_manager) | |
| 396 return; | |
| 397 if (!ShouldAutoPumpDelayedQueue(should_trigger_wakeup, previous_task)) | |
| 398 return; | |
| 399 MoveReadyDelayedTasksToDelayedWorkQueue(lazy_now); | |
| 400 TraceQueueSize(false); | |
| 401 } | |
| 402 | |
| 403 void TaskQueueImpl::UpdateImmediateWorkQueue(bool should_trigger_wakeup, | |
| 404 const Task* previous_task) { | |
| 405 DCHECK(main_thread_only().immediate_work_queue->Empty()); | |
| 406 base::AutoLock lock(any_thread_lock_); | |
| 407 if (!main_thread_only().task_queue_manager) | |
| 408 return; | |
| 409 if (!ShouldAutoPumpImmediateQueueLocked(should_trigger_wakeup, previous_task)) | |
| 410 return; | |
| 411 | |
| 412 main_thread_only().immediate_work_queue->SwapLocked( | |
| 413 any_thread().immediate_incoming_queue); | |
| 414 | |
| 415 // |any_thread().immediate_incoming_queue| is now empty so | |
| 416 // TimeDomain::UpdateQueues no longer needs to consider this queue for | |
| 417 // reloading. | |
| 418 main_thread_only().time_domain->UnregisterAsUpdatableTaskQueue(this); | |
| 419 } | |
| 420 | |
| 421 void TaskQueueImpl::TraceQueueSize(bool is_locked) const { | |
| 422 bool is_tracing; | |
| 423 TRACE_EVENT_CATEGORY_GROUP_ENABLED(disabled_by_default_tracing_category_, | |
| 424 &is_tracing); | |
| 425 if (!is_tracing) | |
| 426 return; | |
| 427 | |
| 428 // It's only safe to access the work queues from the main thread. | |
| 429 // TODO(alexclarke): We should find another way of tracing this | |
| 430 if (base::PlatformThread::CurrentId() != thread_id_) | |
| 431 return; | |
| 432 | |
| 433 if (!is_locked) | |
| 434 any_thread_lock_.Acquire(); | |
| 435 else | |
| 436 any_thread_lock_.AssertAcquired(); | |
| 437 TRACE_COUNTER1(disabled_by_default_tracing_category_, GetName(), | |
| 438 any_thread().immediate_incoming_queue.size() + | |
| 439 main_thread_only().immediate_work_queue->Size() + | |
| 440 main_thread_only().delayed_work_queue->Size() + | |
| 441 main_thread_only().delayed_incoming_queue.size()); | |
| 442 if (!is_locked) | |
| 443 any_thread_lock_.Release(); | |
| 444 } | |
| 445 | |
| 446 void TaskQueueImpl::SetPumpPolicy(PumpPolicy pump_policy) { | |
| 447 base::AutoLock lock(any_thread_lock_); | |
| 448 if (pump_policy == PumpPolicy::AUTO && | |
| 449 any_thread().pump_policy != PumpPolicy::AUTO) { | |
| 450 LazyNow lazy_now(main_thread_only().time_domain->CreateLazyNow()); | |
| 451 PumpQueueLocked(&lazy_now, true); | |
| 452 } | |
| 453 any_thread().pump_policy = pump_policy; | |
| 454 main_thread_only().pump_policy = pump_policy; | |
| 455 } | |
| 456 | |
| 457 TaskQueue::PumpPolicy TaskQueueImpl::GetPumpPolicy() const { | |
| 458 return main_thread_only().pump_policy; | |
| 459 } | |
| 460 | |
| 461 void TaskQueueImpl::PumpQueueLocked(LazyNow* lazy_now, bool may_post_dowork) { | |
| 462 TRACE_EVENT1(disabled_by_default_tracing_category_, | |
| 463 "TaskQueueImpl::PumpQueueLocked", "queue", name_); | |
| 464 TaskQueueManager* task_queue_manager = any_thread().task_queue_manager; | |
| 465 if (!task_queue_manager) | |
| 466 return; | |
| 467 | |
| 468 MoveReadyDelayedTasksToDelayedWorkQueue(lazy_now); | |
| 469 | |
| 470 while (!any_thread().immediate_incoming_queue.empty()) { | |
| 471 main_thread_only().immediate_work_queue->Push( | |
| 472 std::move(any_thread().immediate_incoming_queue.front())); | |
| 473 any_thread().immediate_incoming_queue.pop(); | |
| 474 } | |
| 475 | |
| 476 // |immediate_incoming_queue| is now empty so TimeDomain::UpdateQueues no | |
| 477 // longer needs to consider this queue for reloading. | |
| 478 main_thread_only().time_domain->UnregisterAsUpdatableTaskQueue(this); | |
| 479 | |
| 480 if (main_thread_only().immediate_work_queue->Empty() && | |
| 481 main_thread_only().delayed_work_queue->Empty()) { | |
| 482 return; | |
| 483 } | |
| 484 | |
| 485 if (may_post_dowork) | |
| 486 task_queue_manager->MaybeScheduleImmediateWork(FROM_HERE); | |
| 487 } | |
| 488 | |
| 489 void TaskQueueImpl::PumpQueue(LazyNow* lazy_now, bool may_post_dowork) { | |
| 490 base::AutoLock lock(any_thread_lock_); | |
| 491 PumpQueueLocked(lazy_now, may_post_dowork); | |
| 492 } | |
| 493 | |
| 494 const char* TaskQueueImpl::GetName() const { | |
| 495 return name_; | |
| 496 } | |
| 497 | |
| 498 void TaskQueueImpl::SetQueuePriority(QueuePriority priority) { | |
| 499 if (!main_thread_only().task_queue_manager || priority == GetQueuePriority()) | |
| 500 return; | |
| 501 main_thread_only().task_queue_manager->selector_.SetQueuePriority(this, | |
| 502 priority); | |
| 503 } | |
| 504 | |
| 505 TaskQueueImpl::QueuePriority TaskQueueImpl::GetQueuePriority() const { | |
| 506 size_t set_index = immediate_work_queue()->work_queue_set_index(); | |
| 507 DCHECK_EQ(set_index, delayed_work_queue()->work_queue_set_index()); | |
| 508 return static_cast<TaskQueue::QueuePriority>(set_index); | |
| 509 } | |
| 510 | |
| 511 // static | |
| 512 const char* TaskQueueImpl::PumpPolicyToString( | |
| 513 TaskQueue::PumpPolicy pump_policy) { | |
| 514 switch (pump_policy) { | |
| 515 case TaskQueue::PumpPolicy::AUTO: | |
| 516 return "auto"; | |
| 517 case TaskQueue::PumpPolicy::AFTER_WAKEUP: | |
| 518 return "after_wakeup"; | |
| 519 case TaskQueue::PumpPolicy::MANUAL: | |
| 520 return "manual"; | |
| 521 default: | |
| 522 NOTREACHED(); | |
| 523 return nullptr; | |
| 524 } | |
| 525 } | |
| 526 | |
| 527 // static | |
| 528 const char* TaskQueueImpl::WakeupPolicyToString( | |
| 529 TaskQueue::WakeupPolicy wakeup_policy) { | |
| 530 switch (wakeup_policy) { | |
| 531 case TaskQueue::WakeupPolicy::CAN_WAKE_OTHER_QUEUES: | |
| 532 return "can_wake_other_queues"; | |
| 533 case TaskQueue::WakeupPolicy::DONT_WAKE_OTHER_QUEUES: | |
| 534 return "dont_wake_other_queues"; | |
| 535 default: | |
| 536 NOTREACHED(); | |
| 537 return nullptr; | |
| 538 } | |
| 539 } | |
| 540 | |
| 541 // static | |
| 542 const char* TaskQueueImpl::PriorityToString(QueuePriority priority) { | |
| 543 switch (priority) { | |
| 544 case CONTROL_PRIORITY: | |
| 545 return "control"; | |
| 546 case HIGH_PRIORITY: | |
| 547 return "high"; | |
| 548 case NORMAL_PRIORITY: | |
| 549 return "normal"; | |
| 550 case BEST_EFFORT_PRIORITY: | |
| 551 return "best_effort"; | |
| 552 default: | |
| 553 NOTREACHED(); | |
| 554 return nullptr; | |
| 555 } | |
| 556 } | |
| 557 | |
| 558 void TaskQueueImpl::AsValueInto(base::trace_event::TracedValue* state) const { | |
| 559 base::AutoLock lock(any_thread_lock_); | |
| 560 state->BeginDictionary(); | |
| 561 state->SetString("name", GetName()); | |
| 562 state->SetBoolean("enabled", main_thread_only().is_enabled); | |
| 563 state->SetString("time_domain_name", | |
| 564 main_thread_only().time_domain->GetName()); | |
| 565 state->SetString("pump_policy", PumpPolicyToString(any_thread().pump_policy)); | |
| 566 state->SetString("wakeup_policy", WakeupPolicyToString(wakeup_policy_)); | |
| 567 bool verbose_tracing_enabled = false; | |
| 568 TRACE_EVENT_CATEGORY_GROUP_ENABLED( | |
| 569 disabled_by_default_verbose_tracing_category_, &verbose_tracing_enabled); | |
| 570 state->SetInteger("immediate_incoming_queue_size", | |
| 571 any_thread().immediate_incoming_queue.size()); | |
| 572 state->SetInteger("delayed_incoming_queue_size", | |
| 573 main_thread_only().delayed_incoming_queue.size()); | |
| 574 state->SetInteger("immediate_work_queue_size", | |
| 575 main_thread_only().immediate_work_queue->Size()); | |
| 576 state->SetInteger("delayed_work_queue_size", | |
| 577 main_thread_only().delayed_work_queue->Size()); | |
| 578 if (!main_thread_only().delayed_incoming_queue.empty()) { | |
| 579 base::TimeDelta delay_to_next_task = | |
| 580 (main_thread_only().delayed_incoming_queue.top().delayed_run_time - | |
| 581 main_thread_only().time_domain->CreateLazyNow().Now()); | |
| 582 state->SetDouble("delay_to_next_task_ms", | |
| 583 delay_to_next_task.InMillisecondsF()); | |
| 584 } | |
| 585 if (verbose_tracing_enabled) { | |
| 586 state->BeginArray("immediate_incoming_queue"); | |
| 587 QueueAsValueInto(any_thread().immediate_incoming_queue, state); | |
| 588 state->EndArray(); | |
| 589 state->BeginArray("delayed_work_queue"); | |
| 590 main_thread_only().delayed_work_queue->AsValueInto(state); | |
| 591 state->EndArray(); | |
| 592 state->BeginArray("immediate_work_queue"); | |
| 593 main_thread_only().immediate_work_queue->AsValueInto(state); | |
| 594 state->EndArray(); | |
| 595 state->BeginArray("delayed_incoming_queue"); | |
| 596 QueueAsValueInto(main_thread_only().delayed_incoming_queue, state); | |
| 597 state->EndArray(); | |
| 598 } | |
| 599 state->SetString("priority", PriorityToString(GetQueuePriority())); | |
| 600 state->EndDictionary(); | |
| 601 } | |
| 602 | |
| 603 void TaskQueueImpl::AddTaskObserver( | |
| 604 base::MessageLoop::TaskObserver* task_observer) { | |
| 605 main_thread_only().task_observers.AddObserver(task_observer); | |
| 606 } | |
| 607 | |
| 608 void TaskQueueImpl::RemoveTaskObserver( | |
| 609 base::MessageLoop::TaskObserver* task_observer) { | |
| 610 main_thread_only().task_observers.RemoveObserver(task_observer); | |
| 611 } | |
| 612 | |
| 613 void TaskQueueImpl::NotifyWillProcessTask( | |
| 614 const base::PendingTask& pending_task) { | |
| 615 DCHECK(should_notify_observers_); | |
| 616 if (main_thread_only().blame_context) | |
| 617 main_thread_only().blame_context->Enter(); | |
| 618 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, | |
| 619 main_thread_only().task_observers, | |
| 620 WillProcessTask(pending_task)); | |
| 621 } | |
| 622 | |
| 623 void TaskQueueImpl::NotifyDidProcessTask( | |
| 624 const base::PendingTask& pending_task) { | |
| 625 DCHECK(should_notify_observers_); | |
| 626 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, | |
| 627 main_thread_only().task_observers, | |
| 628 DidProcessTask(pending_task)); | |
| 629 if (main_thread_only().blame_context) | |
| 630 main_thread_only().blame_context->Leave(); | |
| 631 } | |
| 632 | |
| 633 void TaskQueueImpl::SetTimeDomain(TimeDomain* time_domain) { | |
| 634 base::AutoLock lock(any_thread_lock_); | |
| 635 DCHECK(time_domain); | |
| 636 // NOTE this is similar to checking |any_thread().task_queue_manager| but the | |
| 637 // TaskQueueSelectorTests constructs TaskQueueImpl directly with a null | |
| 638 // task_queue_manager. Instead we check |any_thread().time_domain| which is | |
| 639 // another way of asserting that UnregisterTaskQueue has not been called. | |
| 640 DCHECK(any_thread().time_domain); | |
| 641 if (!any_thread().time_domain) | |
| 642 return; | |
| 643 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 644 if (time_domain == main_thread_only().time_domain) | |
| 645 return; | |
| 646 | |
| 647 main_thread_only().time_domain->MigrateQueue(this, time_domain); | |
| 648 main_thread_only().time_domain = time_domain; | |
| 649 any_thread().time_domain = time_domain; | |
| 650 } | |
| 651 | |
| 652 TimeDomain* TaskQueueImpl::GetTimeDomain() const { | |
| 653 if (base::PlatformThread::CurrentId() == thread_id_) | |
| 654 return main_thread_only().time_domain; | |
| 655 | |
| 656 base::AutoLock lock(any_thread_lock_); | |
| 657 return any_thread().time_domain; | |
| 658 } | |
| 659 | |
| 660 void TaskQueueImpl::SetBlameContext( | |
| 661 base::trace_event::BlameContext* blame_context) { | |
| 662 main_thread_only().blame_context = blame_context; | |
| 663 } | |
| 664 | |
| 665 // static | |
| 666 void TaskQueueImpl::QueueAsValueInto(const std::queue<Task>& queue, | |
| 667 base::trace_event::TracedValue* state) { | |
| 668 // Remove const to search |queue| in the destructive manner. Restore the | |
| 669 // content from |visited| later. | |
| 670 std::queue<Task>* mutable_queue = const_cast<std::queue<Task>*>(&queue); | |
| 671 std::queue<Task> visited; | |
| 672 while (!mutable_queue->empty()) { | |
| 673 TaskAsValueInto(mutable_queue->front(), state); | |
| 674 visited.push(std::move(mutable_queue->front())); | |
| 675 mutable_queue->pop(); | |
| 676 } | |
| 677 *mutable_queue = std::move(visited); | |
| 678 } | |
| 679 | |
| 680 // static | |
| 681 void TaskQueueImpl::QueueAsValueInto(const std::priority_queue<Task>& queue, | |
| 682 base::trace_event::TracedValue* state) { | |
| 683 // Remove const to search |queue| in the destructive manner. Restore the | |
| 684 // content from |visited| later. | |
| 685 std::priority_queue<Task>* mutable_queue = | |
| 686 const_cast<std::priority_queue<Task>*>(&queue); | |
| 687 std::priority_queue<Task> visited; | |
| 688 while (!mutable_queue->empty()) { | |
| 689 TaskAsValueInto(mutable_queue->top(), state); | |
| 690 visited.push(std::move(const_cast<Task&>(mutable_queue->top()))); | |
| 691 mutable_queue->pop(); | |
| 692 } | |
| 693 *mutable_queue = std::move(visited); | |
| 694 } | |
| 695 | |
| 696 // static | |
| 697 void TaskQueueImpl::TaskAsValueInto(const Task& task, | |
| 698 base::trace_event::TracedValue* state) { | |
| 699 state->BeginDictionary(); | |
| 700 state->SetString("posted_from", task.posted_from.ToString()); | |
| 701 #ifndef NDEBUG | |
| 702 if (task.enqueue_order_set()) | |
| 703 state->SetInteger("enqueue_order", task.enqueue_order()); | |
| 704 #else | |
| 705 state->SetInteger("enqueue_order", task.enqueue_order()); | |
| 706 #endif | |
| 707 state->SetInteger("sequence_num", task.sequence_num); | |
| 708 state->SetBoolean("nestable", task.nestable); | |
| 709 state->SetBoolean("is_high_res", task.is_high_res); | |
| 710 state->SetDouble( | |
| 711 "delayed_run_time", | |
| 712 (task.delayed_run_time - base::TimeTicks()).InMicroseconds() / 1000.0L); | |
| 713 state->EndDictionary(); | |
| 714 } | |
| 715 | |
| 716 } // namespace internal | |
| 717 } // namespace scheduler | |
| OLD | NEW |