Index: content/renderer/scheduler/task_queue_manager.cc |
diff --git a/content/renderer/scheduler/task_queue_manager.cc b/content/renderer/scheduler/task_queue_manager.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..40f6d8c84c425a343c87668cf8c2661084713581 |
--- /dev/null |
+++ b/content/renderer/scheduler/task_queue_manager.cc |
@@ -0,0 +1,257 @@ |
+// Copyright 2014 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/renderer/scheduler/task_queue_manager.h" |
+ |
+#include "base/bind.h" |
+#include "base/debug/trace_event.h" |
+#include "content/renderer/scheduler/task_queue_selector.h" |
+ |
+namespace content { |
+namespace internal { |
+ |
+class TaskRunner : public base::SingleThreadTaskRunner { |
+ public: |
+ TaskRunner(base::WeakPtr<TaskQueueManager> task_queue_manager, |
+ size_t queue_index); |
+ |
+ // base::SingleThreadTaskRunner implementation. |
+ virtual bool RunsTasksOnCurrentThread() const override; |
+ virtual bool PostDelayedTask(const tracked_objects::Location& from_here, |
+ const base::Closure& task, |
+ base::TimeDelta delay) override; |
+ virtual bool PostNonNestableDelayedTask( |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task, |
+ base::TimeDelta delay) override; |
+ |
+ private: |
+ virtual ~TaskRunner(); |
+ |
+ base::WeakPtr<TaskQueueManager> task_queue_manager_; |
cpu_(ooo_6.6-7.5)
2014/10/20 20:00:10
why is this weak?
Sami
2014/10/21 10:32:54
It's needed to avoid posting new tasks after the s
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
Should the QM take a reference instead?
Sami
2014/10/22 12:09:08
I'm not sure I got that -- QM already holds a refe
|
+ const size_t queue_index_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(TaskRunner); |
+}; |
+ |
+TaskRunner::TaskRunner(base::WeakPtr<TaskQueueManager> task_queue_manager, |
+ size_t queue_index) |
+ : task_queue_manager_(task_queue_manager), queue_index_(queue_index) { |
+} |
+ |
+TaskRunner::~TaskRunner() { |
+} |
+ |
+bool TaskRunner::RunsTasksOnCurrentThread() const { |
+ if (!task_queue_manager_) |
+ return false; |
+ return task_queue_manager_->RunsTasksOnCurrentThread(); |
+} |
+ |
+bool TaskRunner::PostDelayedTask(const tracked_objects::Location& from_here, |
+ const base::Closure& task, |
+ base::TimeDelta delay) { |
+ if (!task_queue_manager_) |
+ return false; |
+ return task_queue_manager_->PostDelayedTask( |
+ queue_index_, from_here, task, delay); |
+} |
+ |
+bool TaskRunner::PostNonNestableDelayedTask( |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task, |
+ base::TimeDelta delay) { |
+ if (!task_queue_manager_) |
+ return false; |
+ return task_queue_manager_->PostNonNestableDelayedTask( |
+ queue_index_, from_here, task, delay); |
+} |
cpu_(ooo_6.6-7.5)
2014/10/20 20:00:10
It would seem at first blush that the only value t
Sami
2014/10/21 10:32:54
The reason we went for the TaskRunner was that all
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
Acknowledged.
|
+ |
+class TaskQueue { |
+ public: |
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
this should be a struct.
Sami
2014/10/22 12:09:07
I turned it into a class on Jochen's suggestion, b
|
+ TaskQueue() : auto_pump(true) {} |
+ ~TaskQueue() {} |
+ |
+ scoped_refptr<TaskRunner> task_runner; |
+ |
+ base::Lock incoming_queue_lock; |
+ base::TaskQueue incoming_queue; |
+ |
+ bool auto_pump; |
+ base::TaskQueue work_queue; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(TaskQueue); |
+}; |
+ |
+} // namespace |
+ |
+TaskQueueManager::TaskQueueManager( |
+ size_t task_queue_count, |
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, |
+ TaskQueueSelector* selector) |
+ : main_task_runner_(main_task_runner), |
+ selector_(selector), |
+ weak_factory_(this) { |
+ DCHECK(main_task_runner->RunsTasksOnCurrentThread()); |
+ |
+ for (size_t i = 0; i < task_queue_count; i++) { |
+ scoped_ptr<internal::TaskQueue> queue(new internal::TaskQueue()); |
+ queue->task_runner = make_scoped_refptr( |
+ new internal::TaskRunner(weak_factory_.GetWeakPtr(), i)); |
+ queues_.push_back(queue.release()); |
+ } |
+ |
+ std::vector<const base::TaskQueue*> work_queues; |
+ for (size_t i = 0; i < queues_.size(); i++) |
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
use range for, that has been approved, right?
Sami
2014/10/22 12:09:08
Ah, that's true. Thanks, done.
|
+ work_queues.push_back(&queues_[i]->work_queue); |
+ selector_->RegisterWorkQueues(work_queues); |
+} |
+ |
+TaskQueueManager::~TaskQueueManager() { |
+} |
+ |
+internal::TaskQueue* TaskQueueManager::Queue(size_t queue_index) const { |
+ DCHECK_LT(queue_index, queues_.size()); |
+ return queues_[queue_index]; |
+} |
+ |
+scoped_refptr<base::SingleThreadTaskRunner> |
+TaskQueueManager::TaskRunnerForQueue(size_t queue_index) { |
+ return Queue(queue_index)->task_runner; |
+} |
+ |
+bool TaskQueueManager::PollQueue(size_t queue_index) { |
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
what is the use case for poll? wouldn't we want t
Sami
2014/10/22 12:09:08
The use case is checking whether new high priority
|
+ internal::TaskQueue* queue = Queue(queue_index); |
+ if (!queue->work_queue.empty()) |
+ return true; |
+ base::AutoLock lock(queue->incoming_queue_lock); |
+ return !queue->incoming_queue.empty(); |
+} |
+ |
+bool TaskQueueManager::ReloadWorkQueue(size_t queue_index) { |
+ main_thread_checker_.CalledOnValidThread(); |
+ internal::TaskQueue* queue = Queue(queue_index); |
+ DCHECK(queue->work_queue.empty()); |
+ base::AutoLock lock(queue->incoming_queue_lock); |
+ if (!queue->auto_pump) |
+ return false; |
+ queue->work_queue.Swap(&queue->incoming_queue); |
+ return !queue->work_queue.empty(); |
+} |
+ |
+void TaskQueueManager::EnqueueTask(size_t queue_index, |
+ const base::PendingTask& pending_task) { |
+ internal::TaskQueue* queue = Queue(queue_index); |
+ base::AutoLock lock(queue->incoming_queue_lock); |
+ if (queue->auto_pump && queue->incoming_queue.empty()) |
+ PostDoWorkOnMainRunner(); |
jar (doing other things)
2014/10/28 22:33:41
In general, it is a bad idea to do much work while
Sami
2014/10/29 11:36:19
You're right, PostDoWorkOnMainRunner() doesn't nee
Sami
2014/10/29 12:49:08
On further reflection, we do need to hold the lock
jar (doing other things)
2014/10/29 21:14:07
Can you point to how the lock acquisition preclude
Sami
2014/10/30 10:43:00
The locking wasn't quite right in this version of
|
+ queue->incoming_queue.push(pending_task); |
+} |
+ |
+void TaskQueueManager::SetAutoPump(size_t queue_index, bool auto_pump) { |
+ internal::TaskQueue* queue = Queue(queue_index); |
+ base::AutoLock lock(queue->incoming_queue_lock); |
+ if (auto_pump) { |
+ queue->auto_pump = true; |
+ PumpQueueLocked(queue); |
+ } else { |
+ queue->auto_pump = false; |
+ } |
+} |
+ |
+void TaskQueueManager::PumpQueueLocked(internal::TaskQueue* queue) { |
+ queue->incoming_queue_lock.AssertAcquired(); |
+ while (!queue->incoming_queue.empty()) { |
+ queue->work_queue.push(queue->incoming_queue.front()); |
jar (doing other things)
2014/10/28 22:33:41
I'm not sure if I'm understanding this full contex
Sami
2014/10/29 11:36:19
First, thanks for shedding some light on the reaso
jar (doing other things)
2014/10/29 21:14:07
Acknowledged.
|
+ queue->incoming_queue.pop(); |
+ } |
+ if (!queue->work_queue.empty()) |
+ PostDoWorkOnMainRunner(); |
+} |
+ |
+void TaskQueueManager::PumpQueue(size_t queue_index) { |
+ internal::TaskQueue* queue = Queue(queue_index); |
+ base::AutoLock lock(queue->incoming_queue_lock); |
+ PumpQueueLocked(queue); |
+} |
+ |
+bool TaskQueueManager::UpdateWorkQueues() { |
+ // TODO(skyostil): This is not efficient when the number of queues grows very |
+ // large due to the number of locks taken. Consider optimizing when we get |
+ // there. |
+ bool has_work = false; |
+ for (size_t i = 0; i < queues_.size(); i++) { |
+ if (!queues_[i]->work_queue.empty()) |
+ has_work = true; |
+ else if (ReloadWorkQueue(i)) |
+ has_work = true; |
+ } |
+ return has_work; |
+} |
+ |
+void TaskQueueManager::PostDoWorkOnMainRunner() { |
+ main_task_runner_->PostTask( |
+ FROM_HERE, Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr())); |
+} |
+ |
+void TaskQueueManager::DoWork() { |
+ main_thread_checker_.CalledOnValidThread(); |
+ if (!UpdateWorkQueues()) |
+ return; |
+ |
+ size_t queue_index; |
+ if (!selector_->SelectWorkQueueToService(&queue_index)) |
+ return; |
+ PostDoWorkOnMainRunner(); |
+ RunTaskFromWorkQueue(queue_index); |
+} |
+ |
+void TaskQueueManager::RunTaskFromWorkQueue(size_t queue_index) { |
+ main_thread_checker_.CalledOnValidThread(); |
+ internal::TaskQueue* queue = Queue(queue_index); |
+ DCHECK(!queue->work_queue.empty()); |
+ base::PendingTask pending_task = queue->work_queue.front(); |
+ queue->work_queue.pop(); |
+ task_annotator_.RunTask( |
+ "TaskQueueManager::PostTask", "TaskQueueManager::RunTask", pending_task); |
+} |
+ |
+bool TaskQueueManager::RunsTasksOnCurrentThread() const { |
+ return main_task_runner_->RunsTasksOnCurrentThread(); |
+} |
+ |
+bool TaskQueueManager::PostDelayedTask( |
+ size_t queue_index, |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task, |
+ base::TimeDelta delay) { |
+ int sequence_num = task_sequence_num_.GetNext(); |
+ |
+ base::PendingTask pending_task(from_here, task); |
+ pending_task.sequence_num = sequence_num; |
+ |
+ task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task); |
+ if (delay > base::TimeDelta()) { |
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
why is the logic here different from the logic in
Sami
2014/10/22 12:09:07
We want to defer any non-nestable work directly to
|
+ return main_task_runner_->PostDelayedTask( |
+ from_here, |
+ Bind(&TaskQueueManager::EnqueueTask, |
+ weak_factory_.GetWeakPtr(), |
+ queue_index, |
+ pending_task), |
+ delay); |
+ } |
+ EnqueueTask(queue_index, pending_task); |
+ return true; |
+} |
+ |
+bool TaskQueueManager::PostNonNestableDelayedTask( |
+ size_t queue_index, |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task, |
+ base::TimeDelta delay) { |
+ // Defer non-nestable work to the main task runner. |
+ return main_task_runner_->PostNonNestableDelayedTask(from_here, task, delay); |
+} |
+ |
+} // namespace content |