| OLD | NEW |
| (Empty) |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/child/scheduler/task_queue_manager.h" | |
| 6 | |
| 7 #include <queue> | |
| 8 #include <set> | |
| 9 | |
| 10 #include "base/bind.h" | |
| 11 #include "base/trace_event/trace_event.h" | |
| 12 #include "base/trace_event/trace_event_argument.h" | |
| 13 #include "content/child/scheduler/nestable_single_thread_task_runner.h" | |
| 14 #include "content/child/scheduler/task_queue_selector.h" | |
| 15 #include "content/child/scheduler/time_source.h" | |
| 16 | |
| 17 namespace { | |
| 18 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max(); | |
| 19 } | |
| 20 | |
| 21 namespace content { | |
| 22 namespace internal { | |
| 23 | |
| 24 // Now() is somewhat expensive so it makes sense not to call Now() unless we | |
| 25 // really need to. | |
| 26 class LazyNow { | |
| 27 public: | |
| 28 explicit LazyNow(base::TimeTicks now) | |
| 29 : task_queue_manager_(nullptr), now_(now) { | |
| 30 DCHECK(!now.is_null()); | |
| 31 } | |
| 32 | |
| 33 explicit LazyNow(TaskQueueManager* task_queue_manager) | |
| 34 : task_queue_manager_(task_queue_manager) {} | |
| 35 | |
| 36 base::TimeTicks Now() { | |
| 37 if (now_.is_null()) | |
| 38 now_ = task_queue_manager_->Now(); | |
| 39 return now_; | |
| 40 } | |
| 41 | |
| 42 private: | |
| 43 TaskQueueManager* task_queue_manager_; // NOT OWNED | |
| 44 base::TimeTicks now_; | |
| 45 }; | |
| 46 | |
| 47 class TaskQueue : public base::SingleThreadTaskRunner { | |
| 48 public: | |
| 49 TaskQueue(TaskQueueManager* task_queue_manager, | |
| 50 const char* disabled_by_default_tracing_category); | |
| 51 | |
| 52 // base::SingleThreadTaskRunner implementation. | |
| 53 bool RunsTasksOnCurrentThread() const override; | |
| 54 bool PostDelayedTask(const tracked_objects::Location& from_here, | |
| 55 const base::Closure& task, | |
| 56 base::TimeDelta delay) override { | |
| 57 return PostDelayedTaskImpl(from_here, task, delay, TaskType::NORMAL); | |
| 58 } | |
| 59 | |
| 60 bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here, | |
| 61 const base::Closure& task, | |
| 62 base::TimeDelta delay) override { | |
| 63 return PostDelayedTaskImpl(from_here, task, delay, TaskType::NON_NESTABLE); | |
| 64 } | |
| 65 | |
| 66 bool IsQueueEmpty() const; | |
| 67 | |
| 68 void SetPumpPolicy(TaskQueueManager::PumpPolicy pump_policy); | |
| 69 void PumpQueue(); | |
| 70 | |
| 71 bool NextPendingDelayedTaskRunTime( | |
| 72 base::TimeTicks* next_pending_delayed_task); | |
| 73 | |
| 74 bool UpdateWorkQueue(LazyNow* lazy_now, | |
| 75 const base::PendingTask* previous_task); | |
| 76 base::PendingTask TakeTaskFromWorkQueue(); | |
| 77 | |
| 78 void WillDeleteTaskQueueManager(); | |
| 79 | |
| 80 base::TaskQueue& work_queue() { return work_queue_; } | |
| 81 | |
| 82 void set_name(const char* name) { name_ = name; } | |
| 83 | |
| 84 void AsValueInto(base::trace_event::TracedValue* state) const; | |
| 85 | |
| 86 private: | |
| 87 enum class TaskType { | |
| 88 NORMAL, | |
| 89 NON_NESTABLE, | |
| 90 }; | |
| 91 | |
| 92 ~TaskQueue() override; | |
| 93 | |
| 94 bool PostDelayedTaskImpl(const tracked_objects::Location& from_here, | |
| 95 const base::Closure& task, | |
| 96 base::TimeDelta delay, | |
| 97 TaskType task_type); | |
| 98 | |
| 99 // Delayed task posted to the underlying run loop, which locks |lock_| and | |
| 100 // calls MoveReadyDelayedTasksToIncomingQueueLocked to process dealyed tasks | |
| 101 // that need to be run now. | |
| 102 void MoveReadyDelayedTasksToIncomingQueue(); | |
| 103 | |
| 104 // Enqueues any delayed tasks which should be run now on the incoming_queue_ | |
| 105 // and calls ScheduleDelayedWorkLocked to ensure future tasks are scheduled. | |
| 106 // Must be called with |lock_| locked. | |
| 107 void MoveReadyDelayedTasksToIncomingQueueLocked(LazyNow* lazy_now); | |
| 108 | |
| 109 // Posts MoveReadyDelayedTasksToIncomingQueue if there isn't already a task | |
| 110 // posted on the underlying runloop for the next task's scheduled run time. | |
| 111 void ScheduleDelayedWorkLocked(LazyNow* lazy_now); | |
| 112 | |
| 113 void PumpQueueLocked(); | |
| 114 bool TaskIsOlderThanQueuedTasks(const base::PendingTask* task); | |
| 115 bool ShouldAutoPumpQueueLocked(const base::PendingTask* previous_task); | |
| 116 void EnqueueTaskLocked(const base::PendingTask& pending_task); | |
| 117 | |
| 118 void TraceQueueSize(bool is_locked) const; | |
| 119 static const char* PumpPolicyToString( | |
| 120 TaskQueueManager::PumpPolicy pump_policy); | |
| 121 static void QueueAsValueInto(const base::TaskQueue& queue, | |
| 122 base::trace_event::TracedValue* state); | |
| 123 static void QueueAsValueInto(const base::DelayedTaskQueue& queue, | |
| 124 base::trace_event::TracedValue* state); | |
| 125 static void TaskAsValueInto(const base::PendingTask& task, | |
| 126 base::trace_event::TracedValue* state); | |
| 127 | |
| 128 // This lock protects all members except the work queue and the | |
| 129 // main_thread_checker_. | |
| 130 mutable base::Lock lock_; | |
| 131 base::PlatformThreadId thread_id_; | |
| 132 TaskQueueManager* task_queue_manager_; | |
| 133 base::TaskQueue incoming_queue_; | |
| 134 TaskQueueManager::PumpPolicy pump_policy_; | |
| 135 const char* name_; | |
| 136 const char* disabled_by_default_tracing_category_; | |
| 137 base::DelayedTaskQueue delayed_task_queue_; | |
| 138 std::set<base::TimeTicks> in_flight_kick_delayed_tasks_; | |
| 139 | |
| 140 base::ThreadChecker main_thread_checker_; | |
| 141 base::TaskQueue work_queue_; | |
| 142 | |
| 143 DISALLOW_COPY_AND_ASSIGN(TaskQueue); | |
| 144 }; | |
| 145 | |
| 146 TaskQueue::TaskQueue(TaskQueueManager* task_queue_manager, | |
| 147 const char* disabled_by_default_tracing_category) | |
| 148 : thread_id_(base::PlatformThread::CurrentId()), | |
| 149 task_queue_manager_(task_queue_manager), | |
| 150 pump_policy_(TaskQueueManager::PumpPolicy::AUTO), | |
| 151 name_(nullptr), | |
| 152 disabled_by_default_tracing_category_( | |
| 153 disabled_by_default_tracing_category) { | |
| 154 } | |
| 155 | |
| 156 TaskQueue::~TaskQueue() { | |
| 157 } | |
| 158 | |
| 159 void TaskQueue::WillDeleteTaskQueueManager() { | |
| 160 base::AutoLock lock(lock_); | |
| 161 task_queue_manager_ = nullptr; | |
| 162 delayed_task_queue_ = base::DelayedTaskQueue(); | |
| 163 incoming_queue_ = base::TaskQueue(); | |
| 164 work_queue_ = base::TaskQueue(); | |
| 165 } | |
| 166 | |
| 167 bool TaskQueue::RunsTasksOnCurrentThread() const { | |
| 168 base::AutoLock lock(lock_); | |
| 169 return base::PlatformThread::CurrentId() == thread_id_; | |
| 170 } | |
| 171 | |
| 172 bool TaskQueue::PostDelayedTaskImpl(const tracked_objects::Location& from_here, | |
| 173 const base::Closure& task, | |
| 174 base::TimeDelta delay, | |
| 175 TaskType task_type) { | |
| 176 base::AutoLock lock(lock_); | |
| 177 if (!task_queue_manager_) | |
| 178 return false; | |
| 179 | |
| 180 base::PendingTask pending_task(from_here, task, base::TimeTicks(), | |
| 181 task_type != TaskType::NON_NESTABLE); | |
| 182 task_queue_manager_->DidQueueTask(&pending_task); | |
| 183 | |
| 184 if (delay > base::TimeDelta()) { | |
| 185 base::TimeTicks now = task_queue_manager_->Now(); | |
| 186 pending_task.delayed_run_time = now + delay; | |
| 187 delayed_task_queue_.push(pending_task); | |
| 188 TraceQueueSize(true); | |
| 189 // If we changed the topmost task, then it is time to reschedule. | |
| 190 if (delayed_task_queue_.top().task.Equals(pending_task.task)) { | |
| 191 LazyNow lazy_now(now); | |
| 192 ScheduleDelayedWorkLocked(&lazy_now); | |
| 193 } | |
| 194 return true; | |
| 195 } | |
| 196 EnqueueTaskLocked(pending_task); | |
| 197 return true; | |
| 198 } | |
| 199 | |
| 200 void TaskQueue::MoveReadyDelayedTasksToIncomingQueue() { | |
| 201 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 202 base::AutoLock lock(lock_); | |
| 203 if (!task_queue_manager_) | |
| 204 return; | |
| 205 | |
| 206 LazyNow lazy_now(task_queue_manager_); | |
| 207 MoveReadyDelayedTasksToIncomingQueueLocked(&lazy_now); | |
| 208 } | |
| 209 | |
| 210 void TaskQueue::MoveReadyDelayedTasksToIncomingQueueLocked(LazyNow* lazy_now) { | |
| 211 lock_.AssertAcquired(); | |
| 212 // Enqueue all delayed tasks that should be running now. | |
| 213 while (!delayed_task_queue_.empty() && | |
| 214 delayed_task_queue_.top().delayed_run_time <= lazy_now->Now()) { | |
| 215 in_flight_kick_delayed_tasks_.erase( | |
| 216 delayed_task_queue_.top().delayed_run_time); | |
| 217 EnqueueTaskLocked(delayed_task_queue_.top()); | |
| 218 delayed_task_queue_.pop(); | |
| 219 } | |
| 220 TraceQueueSize(true); | |
| 221 ScheduleDelayedWorkLocked(lazy_now); | |
| 222 } | |
| 223 | |
| 224 void TaskQueue::ScheduleDelayedWorkLocked(LazyNow* lazy_now) { | |
| 225 lock_.AssertAcquired(); | |
| 226 // Any remaining tasks are in the future, so queue a task to kick them. | |
| 227 if (!delayed_task_queue_.empty()) { | |
| 228 base::TimeTicks next_run_time = delayed_task_queue_.top().delayed_run_time; | |
| 229 DCHECK_GT(next_run_time, lazy_now->Now()); | |
| 230 // Make sure we don't have more than one | |
| 231 // MoveReadyDelayedTasksToIncomingQueue posted for a particular scheduled | |
| 232 // run time (note it's fine to have multiple ones in flight for distinct | |
| 233 // run times). | |
| 234 if (in_flight_kick_delayed_tasks_.find(next_run_time) == | |
| 235 in_flight_kick_delayed_tasks_.end()) { | |
| 236 in_flight_kick_delayed_tasks_.insert(next_run_time); | |
| 237 base::TimeDelta delay = next_run_time - lazy_now->Now(); | |
| 238 task_queue_manager_->PostDelayedTask( | |
| 239 FROM_HERE, | |
| 240 Bind(&TaskQueue::MoveReadyDelayedTasksToIncomingQueue, this), delay); | |
| 241 } | |
| 242 } | |
| 243 } | |
| 244 | |
| 245 bool TaskQueue::IsQueueEmpty() const { | |
| 246 if (!work_queue_.empty()) | |
| 247 return false; | |
| 248 | |
| 249 { | |
| 250 base::AutoLock lock(lock_); | |
| 251 return incoming_queue_.empty(); | |
| 252 } | |
| 253 } | |
| 254 | |
| 255 bool TaskQueue::TaskIsOlderThanQueuedTasks(const base::PendingTask* task) { | |
| 256 lock_.AssertAcquired(); | |
| 257 // A null task is passed when UpdateQueue is called before any task is run. | |
| 258 // In this case we don't want to pump an after_wakeup queue, so return true | |
| 259 // here. | |
| 260 if (!task) | |
| 261 return true; | |
| 262 | |
| 263 // Return false if there are no task in the incoming queue. | |
| 264 if (incoming_queue_.empty()) | |
| 265 return false; | |
| 266 | |
| 267 base::PendingTask oldest_queued_task = incoming_queue_.front(); | |
| 268 DCHECK(oldest_queued_task.delayed_run_time.is_null()); | |
| 269 DCHECK(task->delayed_run_time.is_null()); | |
| 270 | |
| 271 // Note: the comparison is correct due to the fact that the PendingTask | |
| 272 // operator inverts its comparison operation in order to work well in a heap | |
| 273 // based priority queue. | |
| 274 return oldest_queued_task < *task; | |
| 275 } | |
| 276 | |
| 277 bool TaskQueue::ShouldAutoPumpQueueLocked( | |
| 278 const base::PendingTask* previous_task) { | |
| 279 lock_.AssertAcquired(); | |
| 280 if (pump_policy_ == TaskQueueManager::PumpPolicy::MANUAL) | |
| 281 return false; | |
| 282 if (pump_policy_ == TaskQueueManager::PumpPolicy::AFTER_WAKEUP && | |
| 283 TaskIsOlderThanQueuedTasks(previous_task)) | |
| 284 return false; | |
| 285 if (incoming_queue_.empty()) | |
| 286 return false; | |
| 287 return true; | |
| 288 } | |
| 289 | |
| 290 bool TaskQueue::NextPendingDelayedTaskRunTime( | |
| 291 base::TimeTicks* next_pending_delayed_task) { | |
| 292 base::AutoLock lock(lock_); | |
| 293 if (delayed_task_queue_.empty()) | |
| 294 return false; | |
| 295 *next_pending_delayed_task = delayed_task_queue_.top().delayed_run_time; | |
| 296 return true; | |
| 297 } | |
| 298 | |
| 299 bool TaskQueue::UpdateWorkQueue(LazyNow* lazy_now, | |
| 300 const base::PendingTask* previous_task) { | |
| 301 if (!work_queue_.empty()) | |
| 302 return true; | |
| 303 | |
| 304 { | |
| 305 base::AutoLock lock(lock_); | |
| 306 if (!ShouldAutoPumpQueueLocked(previous_task)) | |
| 307 return false; | |
| 308 MoveReadyDelayedTasksToIncomingQueueLocked(lazy_now); | |
| 309 work_queue_.Swap(&incoming_queue_); | |
| 310 TraceQueueSize(true); | |
| 311 return true; | |
| 312 } | |
| 313 } | |
| 314 | |
| 315 base::PendingTask TaskQueue::TakeTaskFromWorkQueue() { | |
| 316 base::PendingTask pending_task = work_queue_.front(); | |
| 317 work_queue_.pop(); | |
| 318 TraceQueueSize(false); | |
| 319 return pending_task; | |
| 320 } | |
| 321 | |
| 322 void TaskQueue::TraceQueueSize(bool is_locked) const { | |
| 323 bool is_tracing; | |
| 324 TRACE_EVENT_CATEGORY_GROUP_ENABLED(disabled_by_default_tracing_category_, | |
| 325 &is_tracing); | |
| 326 if (!is_tracing || !name_) | |
| 327 return; | |
| 328 if (!is_locked) | |
| 329 lock_.Acquire(); | |
| 330 else | |
| 331 lock_.AssertAcquired(); | |
| 332 TRACE_COUNTER1( | |
| 333 disabled_by_default_tracing_category_, name_, | |
| 334 incoming_queue_.size() + work_queue_.size() + delayed_task_queue_.size()); | |
| 335 if (!is_locked) | |
| 336 lock_.Release(); | |
| 337 } | |
| 338 | |
| 339 void TaskQueue::EnqueueTaskLocked(const base::PendingTask& pending_task) { | |
| 340 lock_.AssertAcquired(); | |
| 341 if (!task_queue_manager_) | |
| 342 return; | |
| 343 if (pump_policy_ == TaskQueueManager::PumpPolicy::AUTO && | |
| 344 incoming_queue_.empty()) | |
| 345 task_queue_manager_->MaybePostDoWorkOnMainRunner(); | |
| 346 incoming_queue_.push(pending_task); | |
| 347 | |
| 348 if (!pending_task.delayed_run_time.is_null()) { | |
| 349 // Clear the delayed run time because we've already applied the delay | |
| 350 // before getting here. | |
| 351 incoming_queue_.back().delayed_run_time = base::TimeTicks(); | |
| 352 } | |
| 353 TraceQueueSize(true); | |
| 354 } | |
| 355 | |
| 356 void TaskQueue::SetPumpPolicy(TaskQueueManager::PumpPolicy pump_policy) { | |
| 357 base::AutoLock lock(lock_); | |
| 358 if (pump_policy == TaskQueueManager::PumpPolicy::AUTO && | |
| 359 pump_policy_ != TaskQueueManager::PumpPolicy::AUTO) { | |
| 360 PumpQueueLocked(); | |
| 361 } | |
| 362 pump_policy_ = pump_policy; | |
| 363 } | |
| 364 | |
| 365 void TaskQueue::PumpQueueLocked() { | |
| 366 lock_.AssertAcquired(); | |
| 367 if (task_queue_manager_) { | |
| 368 LazyNow lazy_now(task_queue_manager_); | |
| 369 MoveReadyDelayedTasksToIncomingQueueLocked(&lazy_now); | |
| 370 } | |
| 371 while (!incoming_queue_.empty()) { | |
| 372 work_queue_.push(incoming_queue_.front()); | |
| 373 incoming_queue_.pop(); | |
| 374 } | |
| 375 if (!work_queue_.empty()) | |
| 376 task_queue_manager_->MaybePostDoWorkOnMainRunner(); | |
| 377 } | |
| 378 | |
| 379 void TaskQueue::PumpQueue() { | |
| 380 base::AutoLock lock(lock_); | |
| 381 PumpQueueLocked(); | |
| 382 } | |
| 383 | |
| 384 void TaskQueue::AsValueInto(base::trace_event::TracedValue* state) const { | |
| 385 base::AutoLock lock(lock_); | |
| 386 state->BeginDictionary(); | |
| 387 if (name_) | |
| 388 state->SetString("name", name_); | |
| 389 state->SetString("pump_policy", PumpPolicyToString(pump_policy_)); | |
| 390 state->BeginArray("incoming_queue"); | |
| 391 QueueAsValueInto(incoming_queue_, state); | |
| 392 state->EndArray(); | |
| 393 state->BeginArray("work_queue"); | |
| 394 QueueAsValueInto(work_queue_, state); | |
| 395 state->EndArray(); | |
| 396 state->BeginArray("delayed_task_queue"); | |
| 397 QueueAsValueInto(delayed_task_queue_, state); | |
| 398 state->EndArray(); | |
| 399 state->EndDictionary(); | |
| 400 } | |
| 401 | |
| 402 // static | |
| 403 const char* TaskQueue::PumpPolicyToString( | |
| 404 TaskQueueManager::PumpPolicy pump_policy) { | |
| 405 switch (pump_policy) { | |
| 406 case TaskQueueManager::PumpPolicy::AUTO: | |
| 407 return "auto"; | |
| 408 case TaskQueueManager::PumpPolicy::AFTER_WAKEUP: | |
| 409 return "after_wakeup"; | |
| 410 case TaskQueueManager::PumpPolicy::MANUAL: | |
| 411 return "manual"; | |
| 412 default: | |
| 413 NOTREACHED(); | |
| 414 return nullptr; | |
| 415 } | |
| 416 } | |
| 417 | |
| 418 // static | |
| 419 void TaskQueue::QueueAsValueInto(const base::TaskQueue& queue, | |
| 420 base::trace_event::TracedValue* state) { | |
| 421 base::TaskQueue queue_copy(queue); | |
| 422 while (!queue_copy.empty()) { | |
| 423 TaskAsValueInto(queue_copy.front(), state); | |
| 424 queue_copy.pop(); | |
| 425 } | |
| 426 } | |
| 427 | |
| 428 // static | |
| 429 void TaskQueue::QueueAsValueInto(const base::DelayedTaskQueue& queue, | |
| 430 base::trace_event::TracedValue* state) { | |
| 431 base::DelayedTaskQueue queue_copy(queue); | |
| 432 while (!queue_copy.empty()) { | |
| 433 TaskAsValueInto(queue_copy.top(), state); | |
| 434 queue_copy.pop(); | |
| 435 } | |
| 436 } | |
| 437 | |
| 438 // static | |
| 439 void TaskQueue::TaskAsValueInto(const base::PendingTask& task, | |
| 440 base::trace_event::TracedValue* state) { | |
| 441 state->BeginDictionary(); | |
| 442 state->SetString("posted_from", task.posted_from.ToString()); | |
| 443 state->SetInteger("sequence_num", task.sequence_num); | |
| 444 state->SetBoolean("nestable", task.nestable); | |
| 445 state->SetBoolean("is_high_res", task.is_high_res); | |
| 446 state->SetDouble( | |
| 447 "delayed_run_time", | |
| 448 (task.delayed_run_time - base::TimeTicks()).InMicroseconds() / 1000.0L); | |
| 449 state->EndDictionary(); | |
| 450 } | |
| 451 | |
| 452 } // namespace internal | |
| 453 | |
| 454 TaskQueueManager::TaskQueueManager( | |
| 455 size_t task_queue_count, | |
| 456 scoped_refptr<NestableSingleThreadTaskRunner> main_task_runner, | |
| 457 TaskQueueSelector* selector, | |
| 458 const char* disabled_by_default_tracing_category) | |
| 459 : main_task_runner_(main_task_runner), | |
| 460 selector_(selector), | |
| 461 task_was_run_bitmap_(0), | |
| 462 pending_dowork_count_(0), | |
| 463 work_batch_size_(1), | |
| 464 time_source_(new TimeSource), | |
| 465 disabled_by_default_tracing_category_( | |
| 466 disabled_by_default_tracing_category), | |
| 467 deletion_sentinel_(new DeletionSentinel()), | |
| 468 weak_factory_(this) { | |
| 469 DCHECK(main_task_runner->RunsTasksOnCurrentThread()); | |
| 470 DCHECK_LE(task_queue_count, sizeof(task_was_run_bitmap_) * CHAR_BIT) | |
| 471 << "You need a bigger int for task_was_run_bitmap_"; | |
| 472 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, | |
| 473 "TaskQueueManager", this); | |
| 474 | |
| 475 for (size_t i = 0; i < task_queue_count; i++) { | |
| 476 scoped_refptr<internal::TaskQueue> queue(make_scoped_refptr( | |
| 477 new internal::TaskQueue(this, disabled_by_default_tracing_category))); | |
| 478 queues_.push_back(queue); | |
| 479 } | |
| 480 | |
| 481 std::vector<const base::TaskQueue*> work_queues; | |
| 482 for (const auto& queue : queues_) | |
| 483 work_queues.push_back(&queue->work_queue()); | |
| 484 selector_->RegisterWorkQueues(work_queues); | |
| 485 selector_->SetTaskQueueSelectorObserver(this); | |
| 486 | |
| 487 do_work_from_main_thread_closure_ = | |
| 488 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); | |
| 489 do_work_from_other_thread_closure_ = | |
| 490 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); | |
| 491 } | |
| 492 | |
| 493 TaskQueueManager::~TaskQueueManager() { | |
| 494 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, | |
| 495 "TaskQueueManager", this); | |
| 496 for (auto& queue : queues_) | |
| 497 queue->WillDeleteTaskQueueManager(); | |
| 498 selector_->SetTaskQueueSelectorObserver(nullptr); | |
| 499 } | |
| 500 | |
| 501 internal::TaskQueue* TaskQueueManager::Queue(size_t queue_index) const { | |
| 502 DCHECK_LT(queue_index, queues_.size()); | |
| 503 return queues_[queue_index].get(); | |
| 504 } | |
| 505 | |
| 506 scoped_refptr<base::SingleThreadTaskRunner> | |
| 507 TaskQueueManager::TaskRunnerForQueue(size_t queue_index) const { | |
| 508 return Queue(queue_index); | |
| 509 } | |
| 510 | |
| 511 bool TaskQueueManager::IsQueueEmpty(size_t queue_index) const { | |
| 512 internal::TaskQueue* queue = Queue(queue_index); | |
| 513 return queue->IsQueueEmpty(); | |
| 514 } | |
| 515 | |
| 516 base::TimeTicks TaskQueueManager::NextPendingDelayedTaskRunTime() { | |
| 517 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 518 bool found_pending_task = false; | |
| 519 base::TimeTicks next_pending_delayed_task( | |
| 520 base::TimeTicks::FromInternalValue(kMaxTimeTicks)); | |
| 521 for (auto& queue : queues_) { | |
| 522 base::TimeTicks queues_next_pending_delayed_task; | |
| 523 if (queue->NextPendingDelayedTaskRunTime( | |
| 524 &queues_next_pending_delayed_task)) { | |
| 525 found_pending_task = true; | |
| 526 next_pending_delayed_task = | |
| 527 std::min(next_pending_delayed_task, queues_next_pending_delayed_task); | |
| 528 } | |
| 529 } | |
| 530 | |
| 531 if (!found_pending_task) | |
| 532 return base::TimeTicks(); | |
| 533 | |
| 534 DCHECK_NE(next_pending_delayed_task, | |
| 535 base::TimeTicks::FromInternalValue(kMaxTimeTicks)); | |
| 536 return next_pending_delayed_task; | |
| 537 } | |
| 538 | |
| 539 void TaskQueueManager::SetPumpPolicy(size_t queue_index, | |
| 540 PumpPolicy pump_policy) { | |
| 541 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 542 internal::TaskQueue* queue = Queue(queue_index); | |
| 543 queue->SetPumpPolicy(pump_policy); | |
| 544 } | |
| 545 | |
| 546 void TaskQueueManager::PumpQueue(size_t queue_index) { | |
| 547 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 548 internal::TaskQueue* queue = Queue(queue_index); | |
| 549 queue->PumpQueue(); | |
| 550 } | |
| 551 | |
| 552 bool TaskQueueManager::UpdateWorkQueues( | |
| 553 const base::PendingTask* previous_task) { | |
| 554 // TODO(skyostil): This is not efficient when the number of queues grows very | |
| 555 // large due to the number of locks taken. Consider optimizing when we get | |
| 556 // there. | |
| 557 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 558 internal::LazyNow lazy_now(this); | |
| 559 bool has_work = false; | |
| 560 for (auto& queue : queues_) { | |
| 561 has_work |= queue->UpdateWorkQueue(&lazy_now, previous_task); | |
| 562 if (!queue->work_queue().empty()) { | |
| 563 // Currently we should not be getting tasks with delayed run times in any | |
| 564 // of the work queues. | |
| 565 DCHECK(queue->work_queue().front().delayed_run_time.is_null()); | |
| 566 } | |
| 567 } | |
| 568 return has_work; | |
| 569 } | |
| 570 | |
| 571 void TaskQueueManager::MaybePostDoWorkOnMainRunner() { | |
| 572 bool on_main_thread = main_task_runner_->BelongsToCurrentThread(); | |
| 573 if (on_main_thread) { | |
| 574 // We only want one pending DoWork posted from the main thread, or we risk | |
| 575 // an explosion of pending DoWorks which could starve out everything else. | |
| 576 if (pending_dowork_count_ > 0) { | |
| 577 return; | |
| 578 } | |
| 579 pending_dowork_count_++; | |
| 580 main_task_runner_->PostTask(FROM_HERE, do_work_from_main_thread_closure_); | |
| 581 } else { | |
| 582 main_task_runner_->PostTask(FROM_HERE, do_work_from_other_thread_closure_); | |
| 583 } | |
| 584 } | |
| 585 | |
| 586 void TaskQueueManager::DoWork(bool posted_from_main_thread) { | |
| 587 if (posted_from_main_thread) { | |
| 588 pending_dowork_count_--; | |
| 589 DCHECK_GE(pending_dowork_count_, 0); | |
| 590 } | |
| 591 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 592 | |
| 593 // Pass nullptr to UpdateWorkQueues here to prevent waking up a | |
| 594 // pump-after-wakeup queue. | |
| 595 if (!UpdateWorkQueues(nullptr)) | |
| 596 return; | |
| 597 | |
| 598 base::PendingTask previous_task((tracked_objects::Location()), | |
| 599 (base::Closure())); | |
| 600 for (int i = 0; i < work_batch_size_; i++) { | |
| 601 size_t queue_index; | |
| 602 if (!SelectWorkQueueToService(&queue_index)) | |
| 603 return; | |
| 604 // Note that this function won't post another call to DoWork if one is | |
| 605 // already pending, so it is safe to call it in a loop. | |
| 606 MaybePostDoWorkOnMainRunner(); | |
| 607 | |
| 608 if (ProcessTaskFromWorkQueue(queue_index, i > 0, &previous_task)) | |
| 609 return; // The TaskQueueManager got deleted, we must bail out. | |
| 610 | |
| 611 if (!UpdateWorkQueues(&previous_task)) | |
| 612 return; | |
| 613 } | |
| 614 } | |
| 615 | |
| 616 bool TaskQueueManager::SelectWorkQueueToService(size_t* out_queue_index) { | |
| 617 bool should_run = selector_->SelectWorkQueueToService(out_queue_index); | |
| 618 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( | |
| 619 disabled_by_default_tracing_category_, "TaskQueueManager", this, | |
| 620 AsValueWithSelectorResult(should_run, *out_queue_index)); | |
| 621 return should_run; | |
| 622 } | |
| 623 | |
| 624 void TaskQueueManager::DidQueueTask(base::PendingTask* pending_task) { | |
| 625 pending_task->sequence_num = task_sequence_num_.GetNext(); | |
| 626 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", *pending_task); | |
| 627 } | |
| 628 | |
| 629 bool TaskQueueManager::ProcessTaskFromWorkQueue( | |
| 630 size_t queue_index, | |
| 631 bool has_previous_task, | |
| 632 base::PendingTask* previous_task) { | |
| 633 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 634 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_); | |
| 635 internal::TaskQueue* queue = Queue(queue_index); | |
| 636 base::PendingTask pending_task = queue->TakeTaskFromWorkQueue(); | |
| 637 task_was_run_bitmap_ |= UINT64_C(1) << queue_index; | |
| 638 if (!pending_task.nestable && main_task_runner_->IsNested()) { | |
| 639 // Defer non-nestable work to the main task runner. NOTE these tasks can be | |
| 640 // arbitrarily delayed so the additional delay should not be a problem. | |
| 641 main_task_runner_->PostNonNestableTask(pending_task.posted_from, | |
| 642 pending_task.task); | |
| 643 } else { | |
| 644 // Suppress "will" task observer notifications for the first and "did" | |
| 645 // notifications for the last task in the batch to avoid duplicate | |
| 646 // notifications. | |
| 647 if (has_previous_task) { | |
| 648 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_, | |
| 649 DidProcessTask(*previous_task)); | |
| 650 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_, | |
| 651 WillProcessTask(pending_task)); | |
| 652 } | |
| 653 task_annotator_.RunTask("TaskQueueManager::PostTask", | |
| 654 "TaskQueueManager::RunTask", pending_task); | |
| 655 | |
| 656 // Detect if the TaskQueueManager just got deleted. If this happens we must | |
| 657 // not access any member variables after this point. | |
| 658 if (protect->HasOneRef()) | |
| 659 return true; | |
| 660 | |
| 661 pending_task.task.Reset(); | |
| 662 *previous_task = pending_task; | |
| 663 } | |
| 664 return false; | |
| 665 } | |
| 666 | |
| 667 bool TaskQueueManager::RunsTasksOnCurrentThread() const { | |
| 668 return main_task_runner_->RunsTasksOnCurrentThread(); | |
| 669 } | |
| 670 | |
| 671 bool TaskQueueManager::PostDelayedTask( | |
| 672 const tracked_objects::Location& from_here, | |
| 673 const base::Closure& task, | |
| 674 base::TimeDelta delay) { | |
| 675 DCHECK(delay > base::TimeDelta()); | |
| 676 return main_task_runner_->PostDelayedTask(from_here, task, delay); | |
| 677 } | |
| 678 | |
| 679 void TaskQueueManager::SetQueueName(size_t queue_index, const char* name) { | |
| 680 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 681 internal::TaskQueue* queue = Queue(queue_index); | |
| 682 queue->set_name(name); | |
| 683 } | |
| 684 | |
| 685 void TaskQueueManager::SetWorkBatchSize(int work_batch_size) { | |
| 686 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 687 DCHECK_GE(work_batch_size, 1); | |
| 688 work_batch_size_ = work_batch_size; | |
| 689 } | |
| 690 | |
| 691 void TaskQueueManager::AddTaskObserver( | |
| 692 base::MessageLoop::TaskObserver* task_observer) { | |
| 693 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 694 base::MessageLoop::current()->AddTaskObserver(task_observer); | |
| 695 task_observers_.AddObserver(task_observer); | |
| 696 } | |
| 697 | |
| 698 void TaskQueueManager::RemoveTaskObserver( | |
| 699 base::MessageLoop::TaskObserver* task_observer) { | |
| 700 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 701 base::MessageLoop::current()->RemoveTaskObserver(task_observer); | |
| 702 task_observers_.RemoveObserver(task_observer); | |
| 703 } | |
| 704 | |
| 705 void TaskQueueManager::SetTimeSourceForTesting( | |
| 706 scoped_ptr<TimeSource> time_source) { | |
| 707 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 708 time_source_ = time_source.Pass(); | |
| 709 } | |
| 710 | |
| 711 uint64 TaskQueueManager::GetAndClearTaskWasRunOnQueueBitmap() { | |
| 712 uint64 bitmap = task_was_run_bitmap_; | |
| 713 task_was_run_bitmap_ = 0; | |
| 714 return bitmap; | |
| 715 } | |
| 716 | |
| 717 base::TimeTicks TaskQueueManager::Now() const { | |
| 718 return time_source_->Now(); | |
| 719 } | |
| 720 | |
| 721 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | |
| 722 TaskQueueManager::AsValueWithSelectorResult(bool should_run, | |
| 723 size_t selected_queue) const { | |
| 724 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 725 scoped_refptr<base::trace_event::TracedValue> state = | |
| 726 new base::trace_event::TracedValue(); | |
| 727 state->BeginArray("queues"); | |
| 728 for (auto& queue : queues_) | |
| 729 queue->AsValueInto(state.get()); | |
| 730 state->EndArray(); | |
| 731 state->BeginDictionary("selector"); | |
| 732 selector_->AsValueInto(state.get()); | |
| 733 state->EndDictionary(); | |
| 734 if (should_run) | |
| 735 state->SetInteger("selected_queue", selected_queue); | |
| 736 return state; | |
| 737 } | |
| 738 | |
| 739 void TaskQueueManager::OnTaskQueueEnabled() { | |
| 740 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 741 MaybePostDoWorkOnMainRunner(); | |
| 742 } | |
| 743 | |
| 744 } // namespace content | |
| OLD | NEW |