Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/renderer/scheduler/task_queue_manager.h" | |
| 6 | |
| 7 #include "base/bind.h" | |
| 8 #include "base/debug/trace_event.h" | |
| 9 #include "content/renderer/scheduler/task_queue_selector.h" | |
| 10 | |
| 11 namespace content { | |
| 12 namespace internal { | |
| 13 | |
| 14 class TaskRunner : public base::SingleThreadTaskRunner { | |
| 15 public: | |
| 16 TaskRunner(base::WeakPtr<TaskQueueManager> task_queue_manager, | |
| 17 size_t queue_index); | |
| 18 | |
| 19 // base::SingleThreadTaskRunner implementation. | |
| 20 virtual bool RunsTasksOnCurrentThread() const override; | |
| 21 virtual bool PostDelayedTask(const tracked_objects::Location& from_here, | |
| 22 const base::Closure& task, | |
| 23 base::TimeDelta delay) override; | |
| 24 virtual bool PostNonNestableDelayedTask( | |
| 25 const tracked_objects::Location& from_here, | |
| 26 const base::Closure& task, | |
| 27 base::TimeDelta delay) override; | |
| 28 | |
| 29 private: | |
| 30 virtual ~TaskRunner(); | |
| 31 | |
| 32 base::WeakPtr<TaskQueueManager> task_queue_manager_; | |
|
cpu_(ooo_6.6-7.5)
2014/10/20 20:00:10
why is this weak?
Sami
2014/10/21 10:32:54
It's needed to avoid posting new tasks after the s
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
Should the QM take a reference instead?
Sami
2014/10/22 12:09:08
I'm not sure I got that -- QM already holds a refe
| |
| 33 const size_t queue_index_; | |
| 34 | |
| 35 DISALLOW_COPY_AND_ASSIGN(TaskRunner); | |
| 36 }; | |
| 37 | |
| 38 TaskRunner::TaskRunner(base::WeakPtr<TaskQueueManager> task_queue_manager, | |
| 39 size_t queue_index) | |
| 40 : task_queue_manager_(task_queue_manager), queue_index_(queue_index) { | |
| 41 } | |
| 42 | |
| 43 TaskRunner::~TaskRunner() { | |
| 44 } | |
| 45 | |
| 46 bool TaskRunner::RunsTasksOnCurrentThread() const { | |
| 47 if (!task_queue_manager_) | |
| 48 return false; | |
| 49 return task_queue_manager_->RunsTasksOnCurrentThread(); | |
| 50 } | |
| 51 | |
| 52 bool TaskRunner::PostDelayedTask(const tracked_objects::Location& from_here, | |
| 53 const base::Closure& task, | |
| 54 base::TimeDelta delay) { | |
| 55 if (!task_queue_manager_) | |
| 56 return false; | |
| 57 return task_queue_manager_->PostDelayedTask( | |
| 58 queue_index_, from_here, task, delay); | |
| 59 } | |
| 60 | |
| 61 bool TaskRunner::PostNonNestableDelayedTask( | |
| 62 const tracked_objects::Location& from_here, | |
| 63 const base::Closure& task, | |
| 64 base::TimeDelta delay) { | |
| 65 if (!task_queue_manager_) | |
| 66 return false; | |
| 67 return task_queue_manager_->PostNonNestableDelayedTask( | |
| 68 queue_index_, from_here, task, delay); | |
| 69 } | |
|
cpu_(ooo_6.6-7.5)
2014/10/20 20:00:10
It would seem at first blush that the only value t
Sami
2014/10/21 10:32:54
The reason we went for the TaskRunner was that all
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
Acknowledged.
| |
| 70 | |
| 71 class TaskQueue { | |
| 72 public: | |
|
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
this should be a struct.
Sami
2014/10/22 12:09:07
I turned it into a class on Jochen's suggestion, b
| |
| 73 TaskQueue() : auto_pump(true) {} | |
| 74 ~TaskQueue() {} | |
| 75 | |
| 76 scoped_refptr<TaskRunner> task_runner; | |
| 77 | |
| 78 base::Lock incoming_queue_lock; | |
| 79 base::TaskQueue incoming_queue; | |
| 80 | |
| 81 bool auto_pump; | |
| 82 base::TaskQueue work_queue; | |
| 83 | |
| 84 DISALLOW_COPY_AND_ASSIGN(TaskQueue); | |
| 85 }; | |
| 86 | |
| 87 } // namespace | |
| 88 | |
| 89 TaskQueueManager::TaskQueueManager( | |
| 90 size_t task_queue_count, | |
| 91 scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, | |
| 92 TaskQueueSelector* selector) | |
| 93 : main_task_runner_(main_task_runner), | |
| 94 selector_(selector), | |
| 95 weak_factory_(this) { | |
| 96 DCHECK(main_task_runner->RunsTasksOnCurrentThread()); | |
| 97 | |
| 98 for (size_t i = 0; i < task_queue_count; i++) { | |
| 99 scoped_ptr<internal::TaskQueue> queue(new internal::TaskQueue()); | |
| 100 queue->task_runner = make_scoped_refptr( | |
| 101 new internal::TaskRunner(weak_factory_.GetWeakPtr(), i)); | |
| 102 queues_.push_back(queue.release()); | |
| 103 } | |
| 104 | |
| 105 std::vector<const base::TaskQueue*> work_queues; | |
| 106 for (size_t i = 0; i < queues_.size(); i++) | |
|
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
use range for, that has been approved, right?
Sami
2014/10/22 12:09:08
Ah, that's true. Thanks, done.
| |
| 107 work_queues.push_back(&queues_[i]->work_queue); | |
| 108 selector_->RegisterWorkQueues(work_queues); | |
| 109 } | |
| 110 | |
| 111 TaskQueueManager::~TaskQueueManager() { | |
| 112 } | |
| 113 | |
| 114 internal::TaskQueue* TaskQueueManager::Queue(size_t queue_index) const { | |
| 115 DCHECK_LT(queue_index, queues_.size()); | |
| 116 return queues_[queue_index]; | |
| 117 } | |
| 118 | |
| 119 scoped_refptr<base::SingleThreadTaskRunner> | |
| 120 TaskQueueManager::TaskRunnerForQueue(size_t queue_index) { | |
| 121 return Queue(queue_index)->task_runner; | |
| 122 } | |
| 123 | |
| 124 bool TaskQueueManager::PollQueue(size_t queue_index) { | |
|
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
what is the use case for poll? wouldn't we want t
Sami
2014/10/22 12:09:08
The use case is checking whether new high priority
| |
| 125 internal::TaskQueue* queue = Queue(queue_index); | |
| 126 if (!queue->work_queue.empty()) | |
| 127 return true; | |
| 128 base::AutoLock lock(queue->incoming_queue_lock); | |
| 129 return !queue->incoming_queue.empty(); | |
| 130 } | |
| 131 | |
| 132 bool TaskQueueManager::ReloadWorkQueue(size_t queue_index) { | |
| 133 main_thread_checker_.CalledOnValidThread(); | |
| 134 internal::TaskQueue* queue = Queue(queue_index); | |
| 135 DCHECK(queue->work_queue.empty()); | |
| 136 base::AutoLock lock(queue->incoming_queue_lock); | |
| 137 if (!queue->auto_pump) | |
| 138 return false; | |
| 139 queue->work_queue.Swap(&queue->incoming_queue); | |
| 140 return !queue->work_queue.empty(); | |
| 141 } | |
| 142 | |
| 143 void TaskQueueManager::EnqueueTask(size_t queue_index, | |
| 144 const base::PendingTask& pending_task) { | |
| 145 internal::TaskQueue* queue = Queue(queue_index); | |
| 146 base::AutoLock lock(queue->incoming_queue_lock); | |
| 147 if (queue->auto_pump && queue->incoming_queue.empty()) | |
| 148 PostDoWorkOnMainRunner(); | |
|
jar (doing other things)
2014/10/28 22:33:41
In general, it is a bad idea to do much work while
Sami
2014/10/29 11:36:19
You're right, PostDoWorkOnMainRunner() doesn't nee
Sami
2014/10/29 12:49:08
On further reflection, we do need to hold the lock
jar (doing other things)
2014/10/29 21:14:07
Can you point to how the lock acquisition preclude
Sami
2014/10/30 10:43:00
The locking wasn't quite right in this version of
| |
| 149 queue->incoming_queue.push(pending_task); | |
| 150 } | |
| 151 | |
| 152 void TaskQueueManager::SetAutoPump(size_t queue_index, bool auto_pump) { | |
| 153 internal::TaskQueue* queue = Queue(queue_index); | |
| 154 base::AutoLock lock(queue->incoming_queue_lock); | |
| 155 if (auto_pump) { | |
| 156 queue->auto_pump = true; | |
| 157 PumpQueueLocked(queue); | |
| 158 } else { | |
| 159 queue->auto_pump = false; | |
| 160 } | |
| 161 } | |
| 162 | |
| 163 void TaskQueueManager::PumpQueueLocked(internal::TaskQueue* queue) { | |
| 164 queue->incoming_queue_lock.AssertAcquired(); | |
| 165 while (!queue->incoming_queue.empty()) { | |
| 166 queue->work_queue.push(queue->incoming_queue.front()); | |
|
jar (doing other things)
2014/10/28 22:33:41
I'm not sure if I'm understanding this full contex
Sami
2014/10/29 11:36:19
First, thanks for shedding some light on the reaso
jar (doing other things)
2014/10/29 21:14:07
Acknowledged.
| |
| 167 queue->incoming_queue.pop(); | |
| 168 } | |
| 169 if (!queue->work_queue.empty()) | |
| 170 PostDoWorkOnMainRunner(); | |
| 171 } | |
| 172 | |
| 173 void TaskQueueManager::PumpQueue(size_t queue_index) { | |
| 174 internal::TaskQueue* queue = Queue(queue_index); | |
| 175 base::AutoLock lock(queue->incoming_queue_lock); | |
| 176 PumpQueueLocked(queue); | |
| 177 } | |
| 178 | |
| 179 bool TaskQueueManager::UpdateWorkQueues() { | |
| 180 // TODO(skyostil): This is not efficient when the number of queues grows very | |
| 181 // large due to the number of locks taken. Consider optimizing when we get | |
| 182 // there. | |
| 183 bool has_work = false; | |
| 184 for (size_t i = 0; i < queues_.size(); i++) { | |
| 185 if (!queues_[i]->work_queue.empty()) | |
| 186 has_work = true; | |
| 187 else if (ReloadWorkQueue(i)) | |
| 188 has_work = true; | |
| 189 } | |
| 190 return has_work; | |
| 191 } | |
| 192 | |
| 193 void TaskQueueManager::PostDoWorkOnMainRunner() { | |
| 194 main_task_runner_->PostTask( | |
| 195 FROM_HERE, Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr())); | |
| 196 } | |
| 197 | |
| 198 void TaskQueueManager::DoWork() { | |
| 199 main_thread_checker_.CalledOnValidThread(); | |
| 200 if (!UpdateWorkQueues()) | |
| 201 return; | |
| 202 | |
| 203 size_t queue_index; | |
| 204 if (!selector_->SelectWorkQueueToService(&queue_index)) | |
| 205 return; | |
| 206 PostDoWorkOnMainRunner(); | |
| 207 RunTaskFromWorkQueue(queue_index); | |
| 208 } | |
| 209 | |
| 210 void TaskQueueManager::RunTaskFromWorkQueue(size_t queue_index) { | |
| 211 main_thread_checker_.CalledOnValidThread(); | |
| 212 internal::TaskQueue* queue = Queue(queue_index); | |
| 213 DCHECK(!queue->work_queue.empty()); | |
| 214 base::PendingTask pending_task = queue->work_queue.front(); | |
| 215 queue->work_queue.pop(); | |
| 216 task_annotator_.RunTask( | |
| 217 "TaskQueueManager::PostTask", "TaskQueueManager::RunTask", pending_task); | |
| 218 } | |
| 219 | |
| 220 bool TaskQueueManager::RunsTasksOnCurrentThread() const { | |
| 221 return main_task_runner_->RunsTasksOnCurrentThread(); | |
| 222 } | |
| 223 | |
| 224 bool TaskQueueManager::PostDelayedTask( | |
| 225 size_t queue_index, | |
| 226 const tracked_objects::Location& from_here, | |
| 227 const base::Closure& task, | |
| 228 base::TimeDelta delay) { | |
| 229 int sequence_num = task_sequence_num_.GetNext(); | |
| 230 | |
| 231 base::PendingTask pending_task(from_here, task); | |
| 232 pending_task.sequence_num = sequence_num; | |
| 233 | |
| 234 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task); | |
| 235 if (delay > base::TimeDelta()) { | |
|
cpu_(ooo_6.6-7.5)
2014/10/21 19:46:07
why is the logic here different from the logic in
Sami
2014/10/22 12:09:07
We want to defer any non-nestable work directly to
| |
| 236 return main_task_runner_->PostDelayedTask( | |
| 237 from_here, | |
| 238 Bind(&TaskQueueManager::EnqueueTask, | |
| 239 weak_factory_.GetWeakPtr(), | |
| 240 queue_index, | |
| 241 pending_task), | |
| 242 delay); | |
| 243 } | |
| 244 EnqueueTask(queue_index, pending_task); | |
| 245 return true; | |
| 246 } | |
| 247 | |
| 248 bool TaskQueueManager::PostNonNestableDelayedTask( | |
| 249 size_t queue_index, | |
| 250 const tracked_objects::Location& from_here, | |
| 251 const base::Closure& task, | |
| 252 base::TimeDelta delay) { | |
| 253 // Defer non-nestable work to the main task runner. | |
| 254 return main_task_runner_->PostNonNestableDelayedTask(from_here, task, delay); | |
| 255 } | |
| 256 | |
| 257 } // namespace content | |
| OLD | NEW |