OLD | NEW |
| (Empty) |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/raster_worker_pool.h" | |
6 | |
7 #include <string> | |
8 #include <utility> | |
9 #include <vector> | |
10 | |
11 #include "base/strings/stringprintf.h" | |
12 #include "base/threading/thread_restrictions.h" | |
13 #include "base/trace_event/trace_event.h" | |
14 #include "cc/base/math_util.h" | |
15 #include "cc/raster/task_category.h" | |
16 | |
17 namespace content { | |
18 namespace { | |
19 | |
20 // A thread which forwards to RasterWorkerPool::Run with the runnable | |
21 // categories. | |
22 class RasterWorkerPoolThread : public base::SimpleThread { | |
23 public: | |
24 RasterWorkerPoolThread(const std::string& name_prefix, | |
25 const Options& options, | |
26 RasterWorkerPool* pool, | |
27 std::vector<cc::TaskCategory> categories, | |
28 base::ConditionVariable* has_ready_to_run_tasks_cv) | |
29 : SimpleThread(name_prefix, options), | |
30 pool_(pool), | |
31 categories_(categories), | |
32 has_ready_to_run_tasks_cv_(has_ready_to_run_tasks_cv) {} | |
33 | |
34 void Run() override { pool_->Run(categories_, has_ready_to_run_tasks_cv_); } | |
35 | |
36 private: | |
37 RasterWorkerPool* const pool_; | |
38 const std::vector<cc::TaskCategory> categories_; | |
39 base::ConditionVariable* const has_ready_to_run_tasks_cv_; | |
40 }; | |
41 | |
42 } // namespace | |
43 | |
44 // A sequenced task runner which posts tasks to a RasterWorkerPool. | |
45 class RasterWorkerPool::RasterWorkerPoolSequencedTaskRunner | |
46 : public base::SequencedTaskRunner { | |
47 public: | |
48 explicit RasterWorkerPoolSequencedTaskRunner( | |
49 cc::TaskGraphRunner* task_graph_runner) | |
50 : task_graph_runner_(task_graph_runner), | |
51 namespace_token_(task_graph_runner->GetNamespaceToken()) {} | |
52 | |
53 // Overridden from base::TaskRunner: | |
54 bool PostDelayedTask(const tracked_objects::Location& from_here, | |
55 const base::Closure& task, | |
56 base::TimeDelta delay) override { | |
57 return PostNonNestableDelayedTask(from_here, task, delay); | |
58 } | |
59 bool RunsTasksOnCurrentThread() const override { return true; } | |
60 | |
61 // Overridden from base::SequencedTaskRunner: | |
62 bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here, | |
63 const base::Closure& task, | |
64 base::TimeDelta delay) override { | |
65 base::AutoLock lock(lock_); | |
66 | |
67 // Remove completed tasks. | |
68 DCHECK(completed_tasks_.empty()); | |
69 task_graph_runner_->CollectCompletedTasks(namespace_token_, | |
70 &completed_tasks_); | |
71 | |
72 tasks_.erase(tasks_.begin(), tasks_.begin() + completed_tasks_.size()); | |
73 | |
74 tasks_.push_back(make_scoped_refptr(new ClosureTask(task))); | |
75 graph_.Reset(); | |
76 for (const auto& graph_task : tasks_) { | |
77 int dependencies = 0; | |
78 if (!graph_.nodes.empty()) | |
79 dependencies = 1; | |
80 | |
81 // Treat any tasks that are enqueued through the SequencedTaskRunner as | |
82 // FOREGROUND priority. We don't have enough information to know the | |
83 // actual priority of such tasks, so we run them as soon as possible. | |
84 cc::TaskGraph::Node node(graph_task.get(), cc::TASK_CATEGORY_FOREGROUND, | |
85 0u /* priority */, dependencies); | |
86 if (dependencies) { | |
87 graph_.edges.push_back( | |
88 cc::TaskGraph::Edge(graph_.nodes.back().task, node.task)); | |
89 } | |
90 graph_.nodes.push_back(node); | |
91 } | |
92 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_); | |
93 completed_tasks_.clear(); | |
94 return true; | |
95 } | |
96 | |
97 private: | |
98 ~RasterWorkerPoolSequencedTaskRunner() override { | |
99 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_); | |
100 task_graph_runner_->CollectCompletedTasks(namespace_token_, | |
101 &completed_tasks_); | |
102 }; | |
103 | |
104 // Lock to exclusively access all the following members that are used to | |
105 // implement the SequencedTaskRunner interfaces. | |
106 base::Lock lock_; | |
107 | |
108 cc::TaskGraphRunner* task_graph_runner_; | |
109 // Namespace used to schedule tasks in the task graph runner. | |
110 cc::NamespaceToken namespace_token_; | |
111 // List of tasks currently queued up for execution. | |
112 cc::Task::Vector tasks_; | |
113 // Graph object used for scheduling tasks. | |
114 cc::TaskGraph graph_; | |
115 // Cached vector to avoid allocation when getting the list of complete | |
116 // tasks. | |
117 cc::Task::Vector completed_tasks_; | |
118 }; | |
119 | |
120 RasterWorkerPool::RasterWorkerPool() | |
121 : namespace_token_(GetNamespaceToken()), | |
122 has_ready_to_run_foreground_tasks_cv_(&lock_), | |
123 has_ready_to_run_background_tasks_cv_(&lock_), | |
124 has_namespaces_with_finished_running_tasks_cv_(&lock_), | |
125 shutdown_(false) {} | |
126 | |
127 void RasterWorkerPool::Start(int num_threads) { | |
128 DCHECK(threads_.empty()); | |
129 | |
130 // Start |num_threads| threads for foreground work, including nonconcurrent | |
131 // foreground work. | |
132 std::vector<cc::TaskCategory> foreground_categories; | |
133 foreground_categories.push_back(cc::TASK_CATEGORY_NONCONCURRENT_FOREGROUND); | |
134 foreground_categories.push_back(cc::TASK_CATEGORY_FOREGROUND); | |
135 | |
136 for (int i = 0; i < num_threads; i++) { | |
137 std::unique_ptr<base::SimpleThread> thread(new RasterWorkerPoolThread( | |
138 base::StringPrintf("CompositorTileWorker%u", | |
139 static_cast<unsigned>(threads_.size() + 1)) | |
140 .c_str(), | |
141 base::SimpleThread::Options(), this, foreground_categories, | |
142 &has_ready_to_run_foreground_tasks_cv_)); | |
143 thread->Start(); | |
144 threads_.push_back(std::move(thread)); | |
145 } | |
146 | |
147 // Start a single thread for background work. | |
148 std::vector<cc::TaskCategory> background_categories; | |
149 background_categories.push_back(cc::TASK_CATEGORY_BACKGROUND); | |
150 | |
151 // Use background priority for background thread. | |
152 base::SimpleThread::Options thread_options; | |
153 #if !defined(OS_MACOSX) | |
154 thread_options.set_priority(base::ThreadPriority::BACKGROUND); | |
155 #endif | |
156 | |
157 std::unique_ptr<base::SimpleThread> thread(new RasterWorkerPoolThread( | |
158 "CompositorTileWorkerBackground", thread_options, this, | |
159 background_categories, &has_ready_to_run_background_tasks_cv_)); | |
160 thread->Start(); | |
161 threads_.push_back(std::move(thread)); | |
162 } | |
163 | |
164 void RasterWorkerPool::Shutdown() { | |
165 WaitForTasksToFinishRunning(namespace_token_); | |
166 CollectCompletedTasks(namespace_token_, &completed_tasks_); | |
167 // Shutdown raster threads. | |
168 { | |
169 base::AutoLock lock(lock_); | |
170 | |
171 DCHECK(!work_queue_.HasReadyToRunTasks()); | |
172 DCHECK(!work_queue_.HasAnyNamespaces()); | |
173 | |
174 DCHECK(!shutdown_); | |
175 shutdown_ = true; | |
176 | |
177 // Wake up all workers so they exit. | |
178 has_ready_to_run_foreground_tasks_cv_.Broadcast(); | |
179 has_ready_to_run_background_tasks_cv_.Broadcast(); | |
180 } | |
181 while (!threads_.empty()) { | |
182 threads_.back()->Join(); | |
183 threads_.pop_back(); | |
184 } | |
185 } | |
186 | |
187 // Overridden from base::TaskRunner: | |
188 bool RasterWorkerPool::PostDelayedTask( | |
189 const tracked_objects::Location& from_here, | |
190 const base::Closure& task, | |
191 base::TimeDelta delay) { | |
192 base::AutoLock lock(lock_); | |
193 | |
194 // Remove completed tasks. | |
195 DCHECK(completed_tasks_.empty()); | |
196 CollectCompletedTasksWithLockAcquired(namespace_token_, &completed_tasks_); | |
197 | |
198 cc::Task::Vector::iterator end = std::remove_if( | |
199 tasks_.begin(), tasks_.end(), [this](const scoped_refptr<cc::Task>& e) { | |
200 return std::find(this->completed_tasks_.begin(), | |
201 this->completed_tasks_.end(), | |
202 e) != this->completed_tasks_.end(); | |
203 }); | |
204 tasks_.erase(end, tasks_.end()); | |
205 | |
206 tasks_.push_back(make_scoped_refptr(new ClosureTask(task))); | |
207 graph_.Reset(); | |
208 for (const auto& graph_task : tasks_) { | |
209 // Delayed tasks are assigned FOREGROUND category, ensuring that they run as | |
210 // soon as possible once their delay has expired. | |
211 graph_.nodes.push_back( | |
212 cc::TaskGraph::Node(graph_task.get(), cc::TASK_CATEGORY_FOREGROUND, | |
213 0u /* priority */, 0u /* dependencies */)); | |
214 } | |
215 | |
216 ScheduleTasksWithLockAcquired(namespace_token_, &graph_); | |
217 completed_tasks_.clear(); | |
218 return true; | |
219 } | |
220 | |
221 bool RasterWorkerPool::RunsTasksOnCurrentThread() const { | |
222 return true; | |
223 } | |
224 | |
225 void RasterWorkerPool::Run(const std::vector<cc::TaskCategory>& categories, | |
226 base::ConditionVariable* has_ready_to_run_tasks_cv) { | |
227 base::AutoLock lock(lock_); | |
228 | |
229 while (true) { | |
230 if (!RunTaskWithLockAcquired(categories)) { | |
231 // We are no longer running tasks, which may allow another category to | |
232 // start running. Signal other worker threads. | |
233 SignalHasReadyToRunTasksWithLockAcquired(); | |
234 | |
235 // Exit when shutdown is set and no more tasks are pending. | |
236 if (shutdown_) | |
237 break; | |
238 | |
239 // Wait for more tasks. | |
240 has_ready_to_run_tasks_cv->Wait(); | |
241 continue; | |
242 } | |
243 } | |
244 } | |
245 | |
246 void RasterWorkerPool::FlushForTesting() { | |
247 base::AutoLock lock(lock_); | |
248 | |
249 while (!work_queue_.HasFinishedRunningTasksInAllNamespaces()) { | |
250 has_namespaces_with_finished_running_tasks_cv_.Wait(); | |
251 } | |
252 } | |
253 | |
254 scoped_refptr<base::SequencedTaskRunner> | |
255 RasterWorkerPool::CreateSequencedTaskRunner() { | |
256 return new RasterWorkerPoolSequencedTaskRunner(this); | |
257 } | |
258 | |
259 RasterWorkerPool::~RasterWorkerPool() {} | |
260 | |
261 cc::NamespaceToken RasterWorkerPool::GetNamespaceToken() { | |
262 base::AutoLock lock(lock_); | |
263 return work_queue_.GetNamespaceToken(); | |
264 } | |
265 | |
266 void RasterWorkerPool::ScheduleTasks(cc::NamespaceToken token, | |
267 cc::TaskGraph* graph) { | |
268 TRACE_EVENT2("disabled-by-default-cc.debug", | |
269 "RasterWorkerPool::ScheduleTasks", "num_nodes", | |
270 graph->nodes.size(), "num_edges", graph->edges.size()); | |
271 { | |
272 base::AutoLock lock(lock_); | |
273 ScheduleTasksWithLockAcquired(token, graph); | |
274 } | |
275 } | |
276 | |
277 void RasterWorkerPool::ScheduleTasksWithLockAcquired(cc::NamespaceToken token, | |
278 cc::TaskGraph* graph) { | |
279 DCHECK(token.IsValid()); | |
280 DCHECK(!cc::TaskGraphWorkQueue::DependencyMismatch(graph)); | |
281 DCHECK(!shutdown_); | |
282 | |
283 work_queue_.ScheduleTasks(token, graph); | |
284 | |
285 // There may be more work available, so wake up another worker thread. | |
286 SignalHasReadyToRunTasksWithLockAcquired(); | |
287 } | |
288 | |
289 void RasterWorkerPool::WaitForTasksToFinishRunning(cc::NamespaceToken token) { | |
290 TRACE_EVENT0("disabled-by-default-cc.debug", | |
291 "RasterWorkerPool::WaitForTasksToFinishRunning"); | |
292 | |
293 DCHECK(token.IsValid()); | |
294 | |
295 { | |
296 base::AutoLock lock(lock_); | |
297 base::ThreadRestrictions::ScopedAllowWait allow_wait; | |
298 | |
299 auto* task_namespace = work_queue_.GetNamespaceForToken(token); | |
300 | |
301 if (!task_namespace) | |
302 return; | |
303 | |
304 while (!work_queue_.HasFinishedRunningTasksInNamespace(task_namespace)) | |
305 has_namespaces_with_finished_running_tasks_cv_.Wait(); | |
306 | |
307 // There may be other namespaces that have finished running tasks, so wake | |
308 // up another origin thread. | |
309 has_namespaces_with_finished_running_tasks_cv_.Signal(); | |
310 } | |
311 } | |
312 | |
313 void RasterWorkerPool::CollectCompletedTasks( | |
314 cc::NamespaceToken token, | |
315 cc::Task::Vector* completed_tasks) { | |
316 TRACE_EVENT0("disabled-by-default-cc.debug", | |
317 "RasterWorkerPool::CollectCompletedTasks"); | |
318 | |
319 { | |
320 base::AutoLock lock(lock_); | |
321 CollectCompletedTasksWithLockAcquired(token, completed_tasks); | |
322 } | |
323 } | |
324 | |
325 void RasterWorkerPool::CollectCompletedTasksWithLockAcquired( | |
326 cc::NamespaceToken token, | |
327 cc::Task::Vector* completed_tasks) { | |
328 DCHECK(token.IsValid()); | |
329 work_queue_.CollectCompletedTasks(token, completed_tasks); | |
330 } | |
331 | |
332 bool RasterWorkerPool::RunTaskWithLockAcquired( | |
333 const std::vector<cc::TaskCategory>& categories) { | |
334 for (const auto& category : categories) { | |
335 if (ShouldRunTaskForCategoryWithLockAcquired(category)) { | |
336 RunTaskInCategoryWithLockAcquired(category); | |
337 return true; | |
338 } | |
339 } | |
340 return false; | |
341 } | |
342 | |
343 void RasterWorkerPool::RunTaskInCategoryWithLockAcquired( | |
344 cc::TaskCategory category) { | |
345 TRACE_EVENT0("toplevel", "TaskGraphRunner::RunTask"); | |
346 | |
347 lock_.AssertAcquired(); | |
348 | |
349 auto prioritized_task = work_queue_.GetNextTaskToRun(category); | |
350 cc::Task* task = prioritized_task.task; | |
351 | |
352 // There may be more work available, so wake up another worker thread. | |
353 SignalHasReadyToRunTasksWithLockAcquired(); | |
354 | |
355 { | |
356 base::AutoUnlock unlock(lock_); | |
357 | |
358 task->RunOnWorkerThread(); | |
359 } | |
360 | |
361 work_queue_.CompleteTask(prioritized_task); | |
362 | |
363 // If namespace has finished running all tasks, wake up origin threads. | |
364 if (work_queue_.HasFinishedRunningTasksInNamespace( | |
365 prioritized_task.task_namespace)) | |
366 has_namespaces_with_finished_running_tasks_cv_.Signal(); | |
367 } | |
368 | |
369 bool RasterWorkerPool::ShouldRunTaskForCategoryWithLockAcquired( | |
370 cc::TaskCategory category) { | |
371 lock_.AssertAcquired(); | |
372 | |
373 if (!work_queue_.HasReadyToRunTasksForCategory(category)) | |
374 return false; | |
375 | |
376 if (category == cc::TASK_CATEGORY_BACKGROUND) { | |
377 // Only run background tasks if there are no foreground tasks running or | |
378 // ready to run. | |
379 size_t num_running_foreground_tasks = | |
380 work_queue_.NumRunningTasksForCategory( | |
381 cc::TASK_CATEGORY_NONCONCURRENT_FOREGROUND) + | |
382 work_queue_.NumRunningTasksForCategory(cc::TASK_CATEGORY_FOREGROUND); | |
383 bool has_ready_to_run_foreground_tasks = | |
384 work_queue_.HasReadyToRunTasksForCategory( | |
385 cc::TASK_CATEGORY_NONCONCURRENT_FOREGROUND) || | |
386 work_queue_.HasReadyToRunTasksForCategory(cc::TASK_CATEGORY_FOREGROUND); | |
387 | |
388 if (num_running_foreground_tasks > 0 || has_ready_to_run_foreground_tasks) | |
389 return false; | |
390 } | |
391 | |
392 // Enforce that only one nonconcurrent task runs at a time. | |
393 if (category == cc::TASK_CATEGORY_NONCONCURRENT_FOREGROUND && | |
394 work_queue_.NumRunningTasksForCategory( | |
395 cc::TASK_CATEGORY_NONCONCURRENT_FOREGROUND) > 0) { | |
396 return false; | |
397 } | |
398 | |
399 return true; | |
400 } | |
401 | |
402 void RasterWorkerPool::SignalHasReadyToRunTasksWithLockAcquired() { | |
403 lock_.AssertAcquired(); | |
404 | |
405 if (ShouldRunTaskForCategoryWithLockAcquired(cc::TASK_CATEGORY_FOREGROUND) || | |
406 ShouldRunTaskForCategoryWithLockAcquired( | |
407 cc::TASK_CATEGORY_NONCONCURRENT_FOREGROUND)) { | |
408 has_ready_to_run_foreground_tasks_cv_.Signal(); | |
409 } | |
410 | |
411 if (ShouldRunTaskForCategoryWithLockAcquired(cc::TASK_CATEGORY_BACKGROUND)) { | |
412 has_ready_to_run_background_tasks_cv_.Signal(); | |
413 } | |
414 } | |
415 | |
416 RasterWorkerPool::ClosureTask::ClosureTask(const base::Closure& closure) | |
417 : closure_(closure) {} | |
418 | |
419 // Overridden from cc::Task: | |
420 void RasterWorkerPool::ClosureTask::RunOnWorkerThread() { | |
421 closure_.Run(); | |
422 closure_.Reset(); | |
423 } | |
424 | |
425 RasterWorkerPool::ClosureTask::~ClosureTask() {} | |
426 | |
427 } // namespace content | |
OLD | NEW |