Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(568)

Side by Side Diff: content/renderer/raster_worker_pool.cc

Issue 1576133002: Add category handling to RasterWorkerPool (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@hihi
Patch Set: nit Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/renderer/raster_worker_pool.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/raster_worker_pool.h" 5 #include "content/renderer/raster_worker_pool.h"
6 6
7 #include <stddef.h> 7 #include <string>
8 #include <stdint.h>
9
10 #include <utility> 8 #include <utility>
9 #include <vector>
11 10
12 #include "base/strings/stringprintf.h" 11 #include "base/strings/stringprintf.h"
13 #include "base/threading/thread_restrictions.h" 12 #include "base/threading/thread_restrictions.h"
14 #include "base/trace_event/trace_event.h" 13 #include "base/trace_event/trace_event.h"
14 #include "cc/base/math_util.h"
15 #include "cc/raster/task_category.h"
15 16
16 namespace content { 17 namespace content {
18 namespace {
19
20 // A thread which forwards to RasterWorkerPool::Run with the runnable
21 // categories.
22 class RasterWorkerPoolThread : public base::SimpleThread {
23 public:
24 explicit RasterWorkerPoolThread(const std::string& name_prefix,
25 const Options& options,
26 RasterWorkerPool* pool,
27 std::vector<cc::TaskCategory> categories)
28 : SimpleThread(name_prefix, options),
29 pool_(pool),
30 categories_(categories) {}
31
32 void Run() override { pool_->Run(categories_); }
33
34 private:
35 RasterWorkerPool* const pool_;
36 const std::vector<cc::TaskCategory> categories_;
37 };
38
39 } // namespace
17 40
18 // A sequenced task runner which posts tasks to a RasterWorkerPool. 41 // A sequenced task runner which posts tasks to a RasterWorkerPool.
19 class RasterWorkerPool::RasterWorkerPoolSequencedTaskRunner 42 class RasterWorkerPool::RasterWorkerPoolSequencedTaskRunner
20 : public base::SequencedTaskRunner { 43 : public base::SequencedTaskRunner {
21 public: 44 public:
22 explicit RasterWorkerPoolSequencedTaskRunner( 45 explicit RasterWorkerPoolSequencedTaskRunner(
23 cc::TaskGraphRunner* task_graph_runner) 46 cc::TaskGraphRunner* task_graph_runner)
24 : task_graph_runner_(task_graph_runner), 47 : task_graph_runner_(task_graph_runner),
25 namespace_token_(task_graph_runner->GetNamespaceToken()) {} 48 namespace_token_(task_graph_runner->GetNamespaceToken()) {}
26 49
(...skipping 18 matching lines...) Expand all
45 68
46 tasks_.erase(tasks_.begin(), tasks_.begin() + completed_tasks_.size()); 69 tasks_.erase(tasks_.begin(), tasks_.begin() + completed_tasks_.size());
47 70
48 tasks_.push_back(make_scoped_refptr(new ClosureTask(task))); 71 tasks_.push_back(make_scoped_refptr(new ClosureTask(task)));
49 graph_.Reset(); 72 graph_.Reset();
50 for (const auto& graph_task : tasks_) { 73 for (const auto& graph_task : tasks_) {
51 int dependencies = 0; 74 int dependencies = 0;
52 if (!graph_.nodes.empty()) 75 if (!graph_.nodes.empty())
53 dependencies = 1; 76 dependencies = 1;
54 77
55 cc::TaskGraph::Node node(graph_task.get(), 0u /* category */, 78 // Treat any tasks that are enqueued through the SequencedTaskRunner as
79 // FOREGROUND priority. We don't have enough information to know the
80 // actual priority of such tasks, so we run them as soon as possible.
81 cc::TaskGraph::Node node(graph_task.get(), cc::TASK_CATEGORY_FOREGROUND,
56 0u /* priority */, dependencies); 82 0u /* priority */, dependencies);
57 if (dependencies) { 83 if (dependencies) {
58 graph_.edges.push_back( 84 graph_.edges.push_back(
59 cc::TaskGraph::Edge(graph_.nodes.back().task, node.task)); 85 cc::TaskGraph::Edge(graph_.nodes.back().task, node.task));
60 } 86 }
61 graph_.nodes.push_back(node); 87 graph_.nodes.push_back(node);
62 } 88 }
63 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); 89 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
64 completed_tasks_.clear(); 90 completed_tasks_.clear();
65 return true; 91 return true;
(...skipping 26 matching lines...) Expand all
92 : namespace_token_(GetNamespaceToken()), 118 : namespace_token_(GetNamespaceToken()),
93 has_ready_to_run_tasks_cv_(&lock_), 119 has_ready_to_run_tasks_cv_(&lock_),
94 has_namespaces_with_finished_running_tasks_cv_(&lock_), 120 has_namespaces_with_finished_running_tasks_cv_(&lock_),
95 shutdown_(false) {} 121 shutdown_(false) {}
96 122
97 void RasterWorkerPool::Start( 123 void RasterWorkerPool::Start(
98 int num_threads, 124 int num_threads,
99 const base::SimpleThread::Options& thread_options) { 125 const base::SimpleThread::Options& thread_options) {
100 DCHECK(threads_.empty()); 126 DCHECK(threads_.empty());
101 while (threads_.size() < static_cast<size_t>(num_threads)) { 127 while (threads_.size() < static_cast<size_t>(num_threads)) {
102 scoped_ptr<base::DelegateSimpleThread> thread( 128 // Determine the categories that each thread can run.
103 new base::DelegateSimpleThread( 129 std::vector<cc::TaskCategory> task_categories;
104 this, base::StringPrintf("CompositorTileWorker%u", 130
105 static_cast<unsigned>(threads_.size() + 1)) 131 // The first thread can run nonconcurrent tasks.
106 .c_str(), 132 if (threads_.size() == 0) {
107 thread_options)); 133 task_categories.push_back(cc::TASK_CATEGORY_NONCONCURRENT_FOREGROUND);
134 }
135
136 // All threads can run foreground tasks.
137 task_categories.push_back(cc::TASK_CATEGORY_FOREGROUND);
138
139 // The last thread can run background tasks.
140 if (threads_.size() == (static_cast<size_t>(num_threads) - 1)) {
141 task_categories.push_back(cc::TASK_CATEGORY_BACKGROUND);
142 }
143
144 scoped_ptr<base::SimpleThread> thread(new RasterWorkerPoolThread(
145 base::StringPrintf("CompositorTileWorker%u",
146 static_cast<unsigned>(threads_.size() + 1))
147 .c_str(),
148 thread_options, this, task_categories));
108 thread->Start(); 149 thread->Start();
109 threads_.push_back(std::move(thread)); 150 threads_.push_back(std::move(thread));
110 } 151 }
111 } 152 }
112 153
113 void RasterWorkerPool::Shutdown() { 154 void RasterWorkerPool::Shutdown() {
114 WaitForTasksToFinishRunning(namespace_token_); 155 WaitForTasksToFinishRunning(namespace_token_);
115 CollectCompletedTasks(namespace_token_, &completed_tasks_); 156 CollectCompletedTasks(namespace_token_, &completed_tasks_);
116 // Shutdown raster threads. 157 // Shutdown raster threads.
117 { 158 {
(...skipping 28 matching lines...) Expand all
146 cc::Task::Vector::iterator end = std::remove_if( 187 cc::Task::Vector::iterator end = std::remove_if(
147 tasks_.begin(), tasks_.end(), [this](const scoped_refptr<cc::Task>& e) { 188 tasks_.begin(), tasks_.end(), [this](const scoped_refptr<cc::Task>& e) {
148 return std::find(this->completed_tasks_.begin(), 189 return std::find(this->completed_tasks_.begin(),
149 this->completed_tasks_.end(), 190 this->completed_tasks_.end(),
150 e) != this->completed_tasks_.end(); 191 e) != this->completed_tasks_.end();
151 }); 192 });
152 tasks_.erase(end, tasks_.end()); 193 tasks_.erase(end, tasks_.end());
153 194
154 tasks_.push_back(make_scoped_refptr(new ClosureTask(task))); 195 tasks_.push_back(make_scoped_refptr(new ClosureTask(task)));
155 graph_.Reset(); 196 graph_.Reset();
156 for (const auto& graph_task : tasks_) 197 for (const auto& graph_task : tasks_) {
198 // Delayed tasks are assigned FOREGROUND category, ensuring that they run as
199 // soon as possible once their delay has expired.
157 graph_.nodes.push_back( 200 graph_.nodes.push_back(
158 cc::TaskGraph::Node(graph_task.get(), 0u /* category */, 201 cc::TaskGraph::Node(graph_task.get(), cc::TASK_CATEGORY_FOREGROUND,
159 0u /* priority */, 0u /* dependencies */)); 202 0u /* priority */, 0u /* dependencies */));
203 }
160 204
161 ScheduleTasksWithLockAcquired(namespace_token_, &graph_); 205 ScheduleTasksWithLockAcquired(namespace_token_, &graph_);
162 completed_tasks_.clear(); 206 completed_tasks_.clear();
163 return true; 207 return true;
164 } 208 }
165 209
166 bool RasterWorkerPool::RunsTasksOnCurrentThread() const { 210 bool RasterWorkerPool::RunsTasksOnCurrentThread() const {
167 return true; 211 return true;
168 } 212 }
169 213
170 // Overridden from base::DelegateSimpleThread::Delegate: 214 void RasterWorkerPool::Run(const std::vector<cc::TaskCategory>& categories) {
171 void RasterWorkerPool::Run() {
172 base::AutoLock lock(lock_); 215 base::AutoLock lock(lock_);
173 216
174 while (true) { 217 while (true) {
175 if (!RunTaskWithLockAcquired()) { 218 if (!RunTaskWithLockAcquired(categories)) {
176 // Exit when shutdown is set and no more tasks are pending. 219 // Exit when shutdown is set and no more tasks are pending.
177 if (shutdown_) 220 if (shutdown_)
178 break; 221 break;
179 222
180 // Wait for more tasks. 223 // Wait for more tasks.
181 has_ready_to_run_tasks_cv_.Wait(); 224 has_ready_to_run_tasks_cv_.Wait();
182 continue; 225 continue;
183 } 226 }
184 } 227 }
185 } 228 }
(...skipping 29 matching lines...) Expand all
215 } 258 }
216 259
217 void RasterWorkerPool::ScheduleTasksWithLockAcquired(cc::NamespaceToken token, 260 void RasterWorkerPool::ScheduleTasksWithLockAcquired(cc::NamespaceToken token,
218 cc::TaskGraph* graph) { 261 cc::TaskGraph* graph) {
219 DCHECK(token.IsValid()); 262 DCHECK(token.IsValid());
220 DCHECK(!cc::TaskGraphWorkQueue::DependencyMismatch(graph)); 263 DCHECK(!cc::TaskGraphWorkQueue::DependencyMismatch(graph));
221 DCHECK(!shutdown_); 264 DCHECK(!shutdown_);
222 265
223 work_queue_.ScheduleTasks(token, graph); 266 work_queue_.ScheduleTasks(token, graph);
224 267
225 // If there is more work available, wake up worker thread. 268 // If there is more work available, wake up the other worker threads.
226 if (work_queue_.HasReadyToRunTasks()) 269 if (work_queue_.HasReadyToRunTasks())
227 has_ready_to_run_tasks_cv_.Signal(); 270 has_ready_to_run_tasks_cv_.Broadcast();
228 } 271 }
229 272
230 void RasterWorkerPool::WaitForTasksToFinishRunning(cc::NamespaceToken token) { 273 void RasterWorkerPool::WaitForTasksToFinishRunning(cc::NamespaceToken token) {
231 TRACE_EVENT0("cc", "RasterWorkerPool::WaitForTasksToFinishRunning"); 274 TRACE_EVENT0("cc", "RasterWorkerPool::WaitForTasksToFinishRunning");
232 275
233 DCHECK(token.IsValid()); 276 DCHECK(token.IsValid());
234 277
235 { 278 {
236 base::AutoLock lock(lock_); 279 base::AutoLock lock(lock_);
237 base::ThreadRestrictions::ScopedAllowWait allow_wait; 280 base::ThreadRestrictions::ScopedAllowWait allow_wait;
(...skipping 19 matching lines...) Expand all
257 } 300 }
258 } 301 }
259 302
260 void RasterWorkerPool::CollectCompletedTasksWithLockAcquired( 303 void RasterWorkerPool::CollectCompletedTasksWithLockAcquired(
261 cc::NamespaceToken token, 304 cc::NamespaceToken token,
262 cc::Task::Vector* completed_tasks) { 305 cc::Task::Vector* completed_tasks) {
263 DCHECK(token.IsValid()); 306 DCHECK(token.IsValid());
264 work_queue_.CollectCompletedTasks(token, completed_tasks); 307 work_queue_.CollectCompletedTasks(token, completed_tasks);
265 } 308 }
266 309
267 bool RasterWorkerPool::RunTaskWithLockAcquired() { 310 bool RasterWorkerPool::RunTaskWithLockAcquired(
311 const std::vector<cc::TaskCategory>& categories) {
312 for (const auto& category : categories) {
313 if (work_queue_.HasReadyToRunTasksForCategory(category)) {
314 RunTaskInCategoryWithLockAcquired(category);
315 return true;
316 }
317 }
318 return false;
319 }
320
321 void RasterWorkerPool::RunTaskInCategoryWithLockAcquired(
322 cc::TaskCategory category) {
268 TRACE_EVENT0("toplevel", "TaskGraphRunner::RunTask"); 323 TRACE_EVENT0("toplevel", "TaskGraphRunner::RunTask");
269 324
270 lock_.AssertAcquired(); 325 lock_.AssertAcquired();
271 326
272 // Find the first category with any tasks to run. This task graph runner
273 // treats categories as an additional priority.
274 // TODO(ericrk): Add more category/thread logic.
275 const auto& ready_to_run_namespaces = work_queue_.ready_to_run_namespaces();
276 auto found = std::find_if(
277 ready_to_run_namespaces.cbegin(), ready_to_run_namespaces.cend(),
278 [](const std::pair<uint16_t,
279 cc::TaskGraphWorkQueue::TaskNamespace::Vector>& pair) {
280 return !pair.second.empty();
281 });
282
283 if (found == ready_to_run_namespaces.cend()) {
284 return false;
285 }
286
287 const uint16_t category = found->first;
288 auto prioritized_task = work_queue_.GetNextTaskToRun(category); 327 auto prioritized_task = work_queue_.GetNextTaskToRun(category);
289 cc::Task* task = prioritized_task.task; 328 cc::Task* task = prioritized_task.task;
290 329
291 // There may be more work available, so wake up another worker thread.
292 if (work_queue_.HasReadyToRunTasks())
293 has_ready_to_run_tasks_cv_.Signal();
294
295 // Call WillRun() before releasing |lock_| and running task. 330 // Call WillRun() before releasing |lock_| and running task.
296 task->WillRun(); 331 task->WillRun();
297 332
298 { 333 {
299 base::AutoUnlock unlock(lock_); 334 base::AutoUnlock unlock(lock_);
300 335
301 task->RunOnWorkerThread(); 336 task->RunOnWorkerThread();
302 } 337 }
303 338
304 // This will mark task as finished running. 339 // This will mark task as finished running.
305 task->DidRun(); 340 task->DidRun();
306 341
307 work_queue_.CompleteTask(prioritized_task); 342 work_queue_.CompleteTask(prioritized_task);
308 343
344 // We may have just dequeued more tasks, wake up the other worker threads.
345 if (work_queue_.HasReadyToRunTasks())
346 has_ready_to_run_tasks_cv_.Broadcast();
347
309 // If namespace has finished running all tasks, wake up origin threads. 348 // If namespace has finished running all tasks, wake up origin threads.
310 if (work_queue_.HasFinishedRunningTasksInNamespace( 349 if (work_queue_.HasFinishedRunningTasksInNamespace(
311 prioritized_task.task_namespace)) 350 prioritized_task.task_namespace))
312 has_namespaces_with_finished_running_tasks_cv_.Broadcast(); 351 has_namespaces_with_finished_running_tasks_cv_.Broadcast();
313
314 return true;
315 } 352 }
316 353
317 RasterWorkerPool::ClosureTask::ClosureTask(const base::Closure& closure) 354 RasterWorkerPool::ClosureTask::ClosureTask(const base::Closure& closure)
318 : closure_(closure) {} 355 : closure_(closure) {}
319 356
320 // Overridden from cc::Task: 357 // Overridden from cc::Task:
321 void RasterWorkerPool::ClosureTask::RunOnWorkerThread() { 358 void RasterWorkerPool::ClosureTask::RunOnWorkerThread() {
322 closure_.Run(); 359 closure_.Run();
323 closure_.Reset(); 360 closure_.Reset();
324 } 361 }
325 362
326 RasterWorkerPool::ClosureTask::~ClosureTask() {} 363 RasterWorkerPool::ClosureTask::~ClosureTask() {}
327 364
328 } // namespace content 365 } // namespace content
OLDNEW
« no previous file with comments | « content/renderer/raster_worker_pool.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698