Chromium Code Reviews| Index: content/renderer/raster_worker_pool.cc |
| diff --git a/content/renderer/raster_worker_pool.cc b/content/renderer/raster_worker_pool.cc |
| index 6f5687e8cf2113b3e80ecfbfdaa0f15d623daad9..b8b13cdeff9edc900f19246d262ac1d172f8d05d 100644 |
| --- a/content/renderer/raster_worker_pool.cc |
| +++ b/content/renderer/raster_worker_pool.cc |
| @@ -5,6 +5,8 @@ |
| #include "content/renderer/raster_worker_pool.h" |
| #include "base/strings/stringprintf.h" |
| +#include "base/threading/thread_restrictions.h" |
| +#include "base/trace_event/trace_event.h" |
| namespace content { |
| @@ -12,7 +14,8 @@ namespace content { |
| class RasterWorkerPool::RasterWorkerPoolSequencedTaskRunner |
| : public base::SequencedTaskRunner { |
| public: |
| - RasterWorkerPoolSequencedTaskRunner(cc::TaskGraphRunner* task_graph_runner) |
| + explicit RasterWorkerPoolSequencedTaskRunner( |
| + cc::TaskGraphRunner* task_graph_runner) |
| : task_graph_runner_(task_graph_runner), |
| namespace_token_(task_graph_runner->GetNamespaceToken()) {} |
| @@ -63,11 +66,11 @@ class RasterWorkerPool::RasterWorkerPoolSequencedTaskRunner |
| &completed_tasks_); |
| }; |
| - cc::TaskGraphRunner* const task_graph_runner_; |
| - |
| // Lock to exclusively access all the following members that are used to |
| // implement the SequencedTaskRunner interfaces. |
| base::Lock lock_; |
| + |
| + cc::TaskGraphRunner* task_graph_runner_; |
| // Namespace used to schedule tasks in the task graph runner. |
| cc::NamespaceToken namespace_token_; |
| // List of tasks currently queued up for execution. |
| @@ -80,7 +83,10 @@ class RasterWorkerPool::RasterWorkerPoolSequencedTaskRunner |
| }; |
| RasterWorkerPool::RasterWorkerPool() |
| - : namespace_token_(task_graph_runner_.GetNamespaceToken()) {} |
| + : namespace_token_(GetNamespaceToken()), |
| + has_ready_to_run_tasks_cv_(&lock_), |
| + has_namespaces_with_finished_running_tasks_cv_(&lock_), |
| + shutdown_(false) {} |
| void RasterWorkerPool::Start( |
| int num_threads, |
| @@ -99,10 +105,22 @@ void RasterWorkerPool::Start( |
| } |
| void RasterWorkerPool::Shutdown() { |
| - task_graph_runner_.WaitForTasksToFinishRunning(namespace_token_); |
| - task_graph_runner_.CollectCompletedTasks(namespace_token_, &completed_tasks_); |
| + WaitForTasksToFinishRunning(namespace_token_); |
| + CollectCompletedTasks(namespace_token_, &completed_tasks_); |
| // Shutdown raster threads. |
| - task_graph_runner_.Shutdown(); |
| + { |
| + base::AutoLock lock(lock_); |
| + |
| + DCHECK(!work_queue_.HasReadyToRunTasks()); |
| + DCHECK(!work_queue_.HasAnyNamespaces()); |
| + |
| + DCHECK(!shutdown_); |
| + shutdown_ = true; |
| + |
| + // Wake up a worker so it knows it should exit. This will cause all workers |
| + // to exit as each will wake up another worker before exiting. |
| + has_ready_to_run_tasks_cv_.Signal(); |
|
no sievers
2015/11/25 00:03:34
why not Broadcast()?
ericrk
2015/11/25 02:09:52
no reason - that does clean things up a bit.
|
| + } |
| while (!threads_.empty()) { |
| threads_.back()->Join(); |
| threads_.pop_back(); |
| @@ -118,7 +136,7 @@ bool RasterWorkerPool::PostDelayedTask( |
| // Remove completed tasks. |
| DCHECK(completed_tasks_.empty()); |
| - task_graph_runner_.CollectCompletedTasks(namespace_token_, &completed_tasks_); |
| + CollectCompletedTasksWithLockAcquired(namespace_token_, &completed_tasks_); |
| cc::Task::Vector::iterator end = std::remove_if( |
| tasks_.begin(), tasks_.end(), [this](const scoped_refptr<cc::Task>& e) { |
| @@ -133,7 +151,7 @@ bool RasterWorkerPool::PostDelayedTask( |
| for (const auto& graph_task : tasks_) |
| graph_.nodes.push_back(cc::TaskGraph::Node(graph_task.get(), 0, 0)); |
| - task_graph_runner_.ScheduleTasks(namespace_token_, &graph_); |
| + ScheduleTasksWithLockAcquired(namespace_token_, &graph_); |
| completed_tasks_.clear(); |
| return true; |
| } |
| @@ -144,16 +162,142 @@ bool RasterWorkerPool::RunsTasksOnCurrentThread() const { |
| // Overridden from base::DelegateSimpleThread::Delegate: |
| void RasterWorkerPool::Run() { |
| - task_graph_runner_.Run(); |
| + base::AutoLock lock(lock_); |
| + |
| + while (true) { |
| + if (!work_queue_.HasReadyToRunTasks()) { |
| + // Exit when shutdown is set and no more tasks are pending. |
| + if (shutdown_) |
| + break; |
| + |
| + // Wait for more tasks. |
| + has_ready_to_run_tasks_cv_.Wait(); |
| + continue; |
| + } |
| + |
| + RunTaskWithLockAcquired(); |
| + } |
| + |
| + // We noticed we should exit. Wake up the next worker so it knows it should |
| + // exit as well (because the Shutdown() code only signals once). |
| + has_ready_to_run_tasks_cv_.Signal(); |
|
ericrk
2015/11/25 02:09:52
We no longer need this with the broadcast.
|
| +} |
| + |
| +void RasterWorkerPool::FlushForTesting() { |
| + base::AutoLock lock(lock_); |
| + |
| + while (!work_queue_.HasFinishedRunningTasksInAllNamespaces()) { |
| + has_namespaces_with_finished_running_tasks_cv_.Wait(); |
| + } |
| } |
| scoped_refptr<base::SequencedTaskRunner> |
| RasterWorkerPool::CreateSequencedTaskRunner() { |
| - return new RasterWorkerPoolSequencedTaskRunner(&task_graph_runner_); |
| + return new RasterWorkerPoolSequencedTaskRunner(this); |
| } |
| RasterWorkerPool::~RasterWorkerPool() {} |
| +cc::NamespaceToken RasterWorkerPool::GetNamespaceToken() { |
| + base::AutoLock lock(lock_); |
| + return work_queue_.GetNamespaceToken(); |
| +} |
| + |
| +void RasterWorkerPool::ScheduleTasks(cc::NamespaceToken token, |
| + cc::TaskGraph* graph) { |
| + TRACE_EVENT2("cc", "RasterWorkerPool::ScheduleTasks", "num_nodes", |
| + graph->nodes.size(), "num_edges", graph->edges.size()); |
| + { |
| + base::AutoLock lock(lock_); |
| + ScheduleTasksWithLockAcquired(token, graph); |
| + } |
| +} |
| + |
| +void RasterWorkerPool::ScheduleTasksWithLockAcquired(cc::NamespaceToken token, |
| + cc::TaskGraph* graph) { |
| + DCHECK(token.IsValid()); |
| + DCHECK(!cc::TaskGraphWorkQueue::DependencyMismatch(graph)); |
| + DCHECK(!shutdown_); |
| + |
| + work_queue_.ScheduleTasks(token, graph); |
| + |
| + // If there is more work available, wake up worker thread. |
| + if (work_queue_.HasReadyToRunTasks()) |
| + has_ready_to_run_tasks_cv_.Signal(); |
| +} |
| + |
| +void RasterWorkerPool::WaitForTasksToFinishRunning(cc::NamespaceToken token) { |
| + TRACE_EVENT0("cc", "RasterWorkerPool::WaitForTasksToFinishRunning"); |
| + |
| + DCHECK(token.IsValid()); |
| + |
| + { |
| + base::AutoLock lock(lock_); |
| + base::ThreadRestrictions::ScopedAllowWait allow_wait; |
| + |
| + auto* task_namespace = work_queue_.GetNamespaceForToken(token); |
| + |
| + if (!task_namespace) |
| + return; |
| + |
| + while (!work_queue_.HasFinishedRunningTasksInNamespace(task_namespace)) |
| + has_namespaces_with_finished_running_tasks_cv_.Wait(); |
| + |
| + // There may be other namespaces that have finished running tasks, so wake |
| + // up another origin thread. |
| + has_namespaces_with_finished_running_tasks_cv_.Signal(); |
| + } |
| +} |
| + |
| +void RasterWorkerPool::CollectCompletedTasks( |
| + cc::NamespaceToken token, |
| + cc::Task::Vector* completed_tasks) { |
| + TRACE_EVENT0("cc", "RasterWorkerPool::CollectCompletedTasks"); |
| + |
| + { |
| + base::AutoLock lock(lock_); |
| + CollectCompletedTasksWithLockAcquired(token, completed_tasks); |
| + } |
| +} |
| + |
| +void RasterWorkerPool::CollectCompletedTasksWithLockAcquired( |
| + cc::NamespaceToken token, |
| + cc::Task::Vector* completed_tasks) { |
| + DCHECK(token.IsValid()); |
| + work_queue_.CollectCompletedTasks(token, completed_tasks); |
| +} |
| + |
| +void RasterWorkerPool::RunTaskWithLockAcquired() { |
| + TRACE_EVENT0("toplevel", "TaskGraphRunner::RunTask"); |
| + |
| + lock_.AssertAcquired(); |
| + |
| + auto prioritized_task = work_queue_.GetNextTaskToRun(); |
| + cc::Task* task = prioritized_task.task; |
| + |
| + // There may be more work available, so wake up another worker thread. |
|
no sievers
2015/11/25 00:03:34
can you check if there is actually more work? seem
ericrk
2015/11/25 02:09:52
Done.
|
| + has_ready_to_run_tasks_cv_.Signal(); |
| + |
| + // Call WillRun() before releasing |lock_| and running task. |
| + task->WillRun(); |
| + |
| + { |
| + base::AutoUnlock unlock(lock_); |
| + |
| + task->RunOnWorkerThread(); |
| + } |
| + |
| + // This will mark task as finished running. |
| + task->DidRun(); |
| + |
| + work_queue_.CompleteTask(prioritized_task); |
| + |
| + // If namespace has finished running all tasks, wake up origin thread. |
|
no sievers
2015/11/25 00:03:34
there will only ever be one thread waiting right?
ericrk
2015/11/25 02:09:52
Right now there's nothing that stops multiple thre
reveman
2015/11/25 02:30:07
There can be multiple threads waiting. I would rec
|
| + if (work_queue_.HasFinishedRunningTasksInNamespace( |
| + prioritized_task.task_namespace)) |
| + has_namespaces_with_finished_running_tasks_cv_.Signal(); |
| +} |
| + |
| RasterWorkerPool::ClosureTask::ClosureTask(const base::Closure& closure) |
| : closure_(closure) {} |
| @@ -161,7 +305,7 @@ RasterWorkerPool::ClosureTask::ClosureTask(const base::Closure& closure) |
| void RasterWorkerPool::ClosureTask::RunOnWorkerThread() { |
| closure_.Run(); |
| closure_.Reset(); |
| -}; |
| +} |
| RasterWorkerPool::ClosureTask::~ClosureTask() {} |