Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(718)

Side by Side Diff: content/renderer/raster_worker_pool.cc

Issue 1489233003: TaskGraphRunner Group support (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@refactor
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/renderer/raster_worker_pool.h ('k') | content/renderer/render_thread_impl.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/raster_worker_pool.h" 5 #include "content/renderer/raster_worker_pool.h"
6 6
7 #include <utility>
8
7 #include "base/strings/stringprintf.h" 9 #include "base/strings/stringprintf.h"
8 #include "base/threading/thread_restrictions.h" 10 #include "base/threading/thread_restrictions.h"
9 #include "base/trace_event/trace_event.h" 11 #include "base/trace_event/trace_event.h"
10 12
11 namespace content { 13 namespace content {
12 14
13 // A sequenced task runner which posts tasks to a RasterWorkerPool. 15 // A sequenced task runner which posts tasks to a RasterWorkerPool.
14 class RasterWorkerPool::RasterWorkerPoolSequencedTaskRunner 16 class RasterWorkerPool::RasterWorkerPoolSequencedTaskRunner
15 : public base::SequencedTaskRunner { 17 : public base::SequencedTaskRunner {
16 public: 18 public:
(...skipping 23 matching lines...) Expand all
40 42
41 tasks_.erase(tasks_.begin(), tasks_.begin() + completed_tasks_.size()); 43 tasks_.erase(tasks_.begin(), tasks_.begin() + completed_tasks_.size());
42 44
43 tasks_.push_back(make_scoped_refptr(new ClosureTask(task))); 45 tasks_.push_back(make_scoped_refptr(new ClosureTask(task)));
44 graph_.Reset(); 46 graph_.Reset();
45 for (const auto& graph_task : tasks_) { 47 for (const auto& graph_task : tasks_) {
46 int dependencies = 0; 48 int dependencies = 0;
47 if (!graph_.nodes.empty()) 49 if (!graph_.nodes.empty())
48 dependencies = 1; 50 dependencies = 1;
49 51
50 cc::TaskGraph::Node node(graph_task.get(), 0, dependencies); 52 cc::TaskGraph::Node node(graph_task.get(), 0u /* category */,
53 0u /* priority */, dependencies);
51 if (dependencies) { 54 if (dependencies) {
52 graph_.edges.push_back( 55 graph_.edges.push_back(
53 cc::TaskGraph::Edge(graph_.nodes.back().task, node.task)); 56 cc::TaskGraph::Edge(graph_.nodes.back().task, node.task));
54 } 57 }
55 graph_.nodes.push_back(node); 58 graph_.nodes.push_back(node);
56 } 59 }
57 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); 60 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
58 completed_tasks_.clear(); 61 completed_tasks_.clear();
59 return true; 62 return true;
60 } 63 }
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
141 tasks_.begin(), tasks_.end(), [this](const scoped_refptr<cc::Task>& e) { 144 tasks_.begin(), tasks_.end(), [this](const scoped_refptr<cc::Task>& e) {
142 return std::find(this->completed_tasks_.begin(), 145 return std::find(this->completed_tasks_.begin(),
143 this->completed_tasks_.end(), 146 this->completed_tasks_.end(),
144 e) != this->completed_tasks_.end(); 147 e) != this->completed_tasks_.end();
145 }); 148 });
146 tasks_.erase(end, tasks_.end()); 149 tasks_.erase(end, tasks_.end());
147 150
148 tasks_.push_back(make_scoped_refptr(new ClosureTask(task))); 151 tasks_.push_back(make_scoped_refptr(new ClosureTask(task)));
149 graph_.Reset(); 152 graph_.Reset();
150 for (const auto& graph_task : tasks_) 153 for (const auto& graph_task : tasks_)
151 graph_.nodes.push_back(cc::TaskGraph::Node(graph_task.get(), 0, 0)); 154 graph_.nodes.push_back(
155 cc::TaskGraph::Node(graph_task.get(), 0u /* category */,
156 0u /* priority */, 0u /* dependencies */));
152 157
153 ScheduleTasksWithLockAcquired(namespace_token_, &graph_); 158 ScheduleTasksWithLockAcquired(namespace_token_, &graph_);
154 completed_tasks_.clear(); 159 completed_tasks_.clear();
155 return true; 160 return true;
156 } 161 }
157 162
158 bool RasterWorkerPool::RunsTasksOnCurrentThread() const { 163 bool RasterWorkerPool::RunsTasksOnCurrentThread() const {
159 return true; 164 return true;
160 } 165 }
161 166
162 // Overridden from base::DelegateSimpleThread::Delegate: 167 // Overridden from base::DelegateSimpleThread::Delegate:
163 void RasterWorkerPool::Run() { 168 void RasterWorkerPool::Run() {
164 base::AutoLock lock(lock_); 169 base::AutoLock lock(lock_);
165 170
166 while (true) { 171 while (true) {
167 if (!work_queue_.HasReadyToRunTasks()) { 172 if (!RunTaskWithLockAcquired()) {
168 // Exit when shutdown is set and no more tasks are pending. 173 // Exit when shutdown is set and no more tasks are pending.
169 if (shutdown_) 174 if (shutdown_)
170 break; 175 break;
171 176
172 // Wait for more tasks. 177 // Wait for more tasks.
173 has_ready_to_run_tasks_cv_.Wait(); 178 has_ready_to_run_tasks_cv_.Wait();
174 continue; 179 continue;
175 } 180 }
176
177 RunTaskWithLockAcquired();
178 } 181 }
179 } 182 }
180 183
181 void RasterWorkerPool::FlushForTesting() { 184 void RasterWorkerPool::FlushForTesting() {
182 base::AutoLock lock(lock_); 185 base::AutoLock lock(lock_);
183 186
184 while (!work_queue_.HasFinishedRunningTasksInAllNamespaces()) { 187 while (!work_queue_.HasFinishedRunningTasksInAllNamespaces()) {
185 has_namespaces_with_finished_running_tasks_cv_.Wait(); 188 has_namespaces_with_finished_running_tasks_cv_.Wait();
186 } 189 }
187 } 190 }
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
251 } 254 }
252 } 255 }
253 256
254 void RasterWorkerPool::CollectCompletedTasksWithLockAcquired( 257 void RasterWorkerPool::CollectCompletedTasksWithLockAcquired(
255 cc::NamespaceToken token, 258 cc::NamespaceToken token,
256 cc::Task::Vector* completed_tasks) { 259 cc::Task::Vector* completed_tasks) {
257 DCHECK(token.IsValid()); 260 DCHECK(token.IsValid());
258 work_queue_.CollectCompletedTasks(token, completed_tasks); 261 work_queue_.CollectCompletedTasks(token, completed_tasks);
259 } 262 }
260 263
261 void RasterWorkerPool::RunTaskWithLockAcquired() { 264 bool RasterWorkerPool::RunTaskWithLockAcquired() {
262 TRACE_EVENT0("toplevel", "TaskGraphRunner::RunTask"); 265 TRACE_EVENT0("toplevel", "TaskGraphRunner::RunTask");
263 266
264 lock_.AssertAcquired(); 267 lock_.AssertAcquired();
265 268
266 auto prioritized_task = work_queue_.GetNextTaskToRun(); 269 // Find the first category with any tasks to run. This task graph runner
270 // treats categories as an additional priority.
271 // TODO(ericrk): Add more category/thread logic.
272 const auto& ready_to_run_namespaces = work_queue_.ready_to_run_namespaces();
273 auto found = std::find_if(
274 ready_to_run_namespaces.cbegin(), ready_to_run_namespaces.cend(),
275 [](const std::pair<uint16_t,
276 cc::TaskGraphWorkQueue::TaskNamespace::Vector>& pair) {
277 return !pair.second.empty();
278 });
279
280 if (found == ready_to_run_namespaces.cend()) {
281 return false;
282 }
283
284 const uint16_t category = found->first;
285 auto prioritized_task = work_queue_.GetNextTaskToRun(category);
267 cc::Task* task = prioritized_task.task; 286 cc::Task* task = prioritized_task.task;
268 287
269 // There may be more work available, so wake up another worker thread. 288 // There may be more work available, so wake up another worker thread.
270 if (work_queue_.HasReadyToRunTasks()) 289 if (work_queue_.HasReadyToRunTasks())
271 has_ready_to_run_tasks_cv_.Signal(); 290 has_ready_to_run_tasks_cv_.Signal();
272 291
273 // Call WillRun() before releasing |lock_| and running task. 292 // Call WillRun() before releasing |lock_| and running task.
274 task->WillRun(); 293 task->WillRun();
275 294
276 { 295 {
277 base::AutoUnlock unlock(lock_); 296 base::AutoUnlock unlock(lock_);
278 297
279 task->RunOnWorkerThread(); 298 task->RunOnWorkerThread();
280 } 299 }
281 300
282 // This will mark task as finished running. 301 // This will mark task as finished running.
283 task->DidRun(); 302 task->DidRun();
284 303
285 work_queue_.CompleteTask(prioritized_task); 304 work_queue_.CompleteTask(prioritized_task);
286 305
287 // If namespace has finished running all tasks, wake up origin threads. 306 // If namespace has finished running all tasks, wake up origin threads.
288 if (work_queue_.HasFinishedRunningTasksInNamespace( 307 if (work_queue_.HasFinishedRunningTasksInNamespace(
289 prioritized_task.task_namespace)) 308 prioritized_task.task_namespace))
290 has_namespaces_with_finished_running_tasks_cv_.Broadcast(); 309 has_namespaces_with_finished_running_tasks_cv_.Broadcast();
310
311 return true;
291 } 312 }
292 313
293 RasterWorkerPool::ClosureTask::ClosureTask(const base::Closure& closure) 314 RasterWorkerPool::ClosureTask::ClosureTask(const base::Closure& closure)
294 : closure_(closure) {} 315 : closure_(closure) {}
295 316
296 // Overridden from cc::Task: 317 // Overridden from cc::Task:
297 void RasterWorkerPool::ClosureTask::RunOnWorkerThread() { 318 void RasterWorkerPool::ClosureTask::RunOnWorkerThread() {
298 closure_.Run(); 319 closure_.Run();
299 closure_.Reset(); 320 closure_.Reset();
300 } 321 }
301 322
302 RasterWorkerPool::ClosureTask::~ClosureTask() {} 323 RasterWorkerPool::ClosureTask::~ClosureTask() {}
303 324
304 } // namespace content 325 } // namespace content
OLDNEW
« no previous file with comments | « content/renderer/raster_worker_pool.h ('k') | content/renderer/render_thread_impl.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698