Chromium Code Reviews| Index: cc/resources/worker_pool.cc |
| diff --git a/cc/resources/worker_pool.cc b/cc/resources/worker_pool.cc |
| old mode 100644 |
| new mode 100755 |
| index 65134e21d539e75a871a0c63e191d2fec606fb0e..543a4bd40796ae11308d5dc4d031326a311398fd |
| --- a/cc/resources/worker_pool.cc |
| +++ b/cc/resources/worker_pool.cc |
| @@ -9,92 +9,45 @@ |
| #include "base/bind.h" |
| #include "base/containers/hash_tables.h" |
| #include "base/debug/trace_event.h" |
| +#include "base/lazy_instance.h" |
| +#include "base/memory/linked_ptr.h" |
| #include "base/strings/stringprintf.h" |
| #include "base/synchronization/condition_variable.h" |
| #include "base/threading/simple_thread.h" |
| #include "base/threading/thread_restrictions.h" |
| #include "cc/base/scoped_ptr_deque.h" |
| +#include "cc/base/switches.h" |
| namespace cc { |
| -namespace internal { |
| - |
| -WorkerPoolTask::WorkerPoolTask() |
| - : did_schedule_(false), |
| - did_run_(false), |
| - did_complete_(false) { |
| -} |
| - |
| -WorkerPoolTask::~WorkerPoolTask() { |
| - DCHECK_EQ(did_schedule_, did_complete_); |
| - DCHECK(!did_run_ || did_schedule_); |
| - DCHECK(!did_run_ || did_complete_); |
| -} |
| - |
| -void WorkerPoolTask::DidSchedule() { |
| - DCHECK(!did_complete_); |
| - did_schedule_ = true; |
| -} |
| - |
| -void WorkerPoolTask::WillRun() { |
| - DCHECK(did_schedule_); |
| - DCHECK(!did_complete_); |
| - DCHECK(!did_run_); |
| -} |
| - |
| -void WorkerPoolTask::DidRun() { |
| - did_run_ = true; |
| -} |
| - |
| -void WorkerPoolTask::WillComplete() { |
| - DCHECK(!did_complete_); |
| -} |
| - |
| -void WorkerPoolTask::DidComplete() { |
| - DCHECK(did_schedule_); |
| - DCHECK(!did_complete_); |
| - did_complete_ = true; |
| -} |
| - |
| -bool WorkerPoolTask::HasFinishedRunning() const { |
| - return did_run_; |
| -} |
| - |
| -bool WorkerPoolTask::HasCompleted() const { |
| - return did_complete_; |
| -} |
| - |
| -GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) |
| - : task_(task), |
| - priority_(priority), |
| - num_dependencies_(0) { |
| -} |
| - |
| -GraphNode::~GraphNode() { |
| -} |
| - |
| -} // namespace internal |
| +namespace { |
| -// Internal to the worker pool. Any data or logic that needs to be |
| -// shared between threads lives in this class. All members are guarded |
| -// by |lock_|. |
| -class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
| +// TaskGraphRunners can process task graphs from multiple |
| +// workerpool instances. All members are guarded by |lock_|. |
| +class TaskGraphRunner : public base::DelegateSimpleThread::Delegate { |
| public: |
| - Inner(size_t num_threads, const std::string& thread_name_prefix); |
| - virtual ~Inner(); |
| + typedef WorkerPool::TaskGraph TaskGraph; |
| + typedef WorkerPool::TaskVector TaskVector; |
| - void Shutdown(); |
| + TaskGraphRunner(size_t num_threads, const std::string& thread_name_prefix); |
| + virtual ~TaskGraphRunner(); |
| + void Register(const WorkerPool* worker_pool); |
| + void Unregister(const WorkerPool* worker_pool); |
| // Schedule running of tasks in |graph|. Tasks previously scheduled but |
| // no longer needed will be canceled unless already running. Canceled |
| - // tasks are moved to |completed_tasks_| without being run. The result |
| + // tasks are moved to |completed_tasks| without being run. The result |
| // is that once scheduled, a task is guaranteed to end up in the |
| - // |completed_tasks_| queue even if they later get canceled by another |
| + // |completed_tasks| queue even if it later get canceled by another |
| // call to SetTaskGraph(). |
| - void SetTaskGraph(TaskGraph* graph); |
| + void SetTaskGraph(const WorkerPool* worker_pool, TaskGraph* graph); |
| + |
| + // Wait for all scheduled tasks to finish running. |
| + void WaitForTasksToFinishRunning(const WorkerPool* worker_pool); |
| // Collect all completed tasks in |completed_tasks|. |
| - void CollectCompletedTasks(TaskVector* completed_tasks); |
| + void CollectCompletedTasks(const WorkerPool* worker_pool, |
| + TaskVector* completed_tasks); |
| private: |
| static bool CompareTaskPriority(const internal::GraphNode* a, |
| @@ -107,9 +60,36 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
| return a->dependents().size() < b->dependents().size(); |
| } |
| + struct TaskNamespace { |
| + // This set contains all pending tasks. |
| + TaskGraph pending_tasks; |
| + // This set contains all currently running tasks. |
| + TaskGraph running_tasks; |
| + // Completed tasks not yet collected by origin thread. |
| + TaskVector completed_tasks; |
| + // Ordered set of tasks that are ready to run. |
| + internal::GraphNode::Vector ready_to_run_tasks; |
| + }; |
| + |
| + static bool CompareTaskNamespacePriority(const TaskNamespace* a, |
| + const TaskNamespace* b) { |
| + DCHECK(!a->ready_to_run_tasks.empty()); |
| + DCHECK(!a->ready_to_run_tasks.empty()); |
|
vmpstr
2014/01/06 20:46:31
typo: b->...
|
| + return CompareTaskPriority(a->ready_to_run_tasks.back(), |
|
vmpstr
2014/01/06 20:46:31
According to make_heap docs, the max element in a
reveman
2014/01/06 21:59:39
I'm usually not a fan of putting the type in the v
vmpstr
2014/01/06 22:07:20
I think a comment here is fine. (and possibly anyw
sohanjg
2014/01/07 08:35:23
Done.
|
| + b->ready_to_run_tasks.back()); |
| + } |
| + |
| + typedef std::map<const WorkerPool*, linked_ptr<TaskNamespace> > |
| + TaskNamespaceMap; |
| + |
| // Overridden from base::DelegateSimpleThread: |
| virtual void Run() OVERRIDE; |
| + inline bool has_finished_running_tasks(TaskNamespace* task_namespace) { |
| + return (task_namespace->pending_tasks.empty() && |
| + task_namespace->running_tasks.empty()); |
| + } |
| + |
| // This lock protects all members of this class except |
| // |worker_pool_on_origin_thread_|. Do not read or modify anything |
| // without holding this lock. Do not block while holding this lock. |
| @@ -119,6 +99,10 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
| // tasks are ready to run or shutdown starts. |
| base::ConditionVariable has_ready_to_run_tasks_cv_; |
| + // Condition variable that is waited on by origin threads until a |
| + // namespace has finished running all associated tasks. |
| + base::ConditionVariable has_namespaces_with_finished_running_tasks_cv_; |
| + |
| // Provides each running thread loop with a unique index. First thread |
| // loop index is 0. |
| unsigned next_thread_index_; |
| @@ -127,27 +111,22 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
| // are pending. |
| bool shutdown_; |
| - // This set contains all pending tasks. |
| - GraphNodeMap pending_tasks_; |
| + // This set contains all registered namespaces. |
| + TaskNamespaceMap namespaces_; |
| - // Priority queue containing tasks that are ready to run. |
| - internal::GraphNode::Vector ready_to_run_tasks_; |
| - |
| - // This set contains all currently running tasks. |
| - GraphNodeMap running_tasks_; |
| - |
| - // Completed tasks not yet collected by origin thread. |
| - TaskVector completed_tasks_; |
| + // Ordered set of tasks namespaces that have ready to run tasks. |
| + std::vector<TaskNamespace*> ready_to_run_namespaces_; |
| ScopedPtrDeque<base::DelegateSimpleThread> workers_; |
| - DISALLOW_COPY_AND_ASSIGN(Inner); |
| + DISALLOW_COPY_AND_ASSIGN(TaskGraphRunner); |
| }; |
| -WorkerPool::Inner::Inner( |
| +TaskGraphRunner::TaskGraphRunner( |
| size_t num_threads, const std::string& thread_name_prefix) |
| : lock_(), |
| has_ready_to_run_tasks_cv_(&lock_), |
| + has_namespaces_with_finished_running_tasks_cv_(&lock_), |
| next_thread_index_(0), |
| shutdown_(false) { |
| base::AutoLock lock(lock_); |
| @@ -168,21 +147,13 @@ WorkerPool::Inner::Inner( |
| } |
| } |
| -WorkerPool::Inner::~Inner() { |
| - base::AutoLock lock(lock_); |
| - |
| - DCHECK(shutdown_); |
| - |
| - DCHECK_EQ(0u, pending_tasks_.size()); |
| - DCHECK_EQ(0u, ready_to_run_tasks_.size()); |
| - DCHECK_EQ(0u, running_tasks_.size()); |
| - DCHECK_EQ(0u, completed_tasks_.size()); |
| -} |
| - |
| -void WorkerPool::Inner::Shutdown() { |
| +TaskGraphRunner::~TaskGraphRunner() { |
| { |
| base::AutoLock lock(lock_); |
| + DCHECK_EQ(0u, ready_to_run_namespaces_.size()); |
| + DCHECK_EQ(0u, namespaces_.size()); |
| + |
| DCHECK(!shutdown_); |
| shutdown_ = true; |
| @@ -200,22 +171,59 @@ void WorkerPool::Inner::Shutdown() { |
| } |
| } |
| -void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { |
| - // It is OK to call SetTaskGraph() after shutdown if |graph| is empty. |
| - DCHECK(graph->empty() || !shutdown_); |
| +void TaskGraphRunner::Register(const WorkerPool* worker_pool) { |
| + base::AutoLock lock(lock_); |
| + |
| + DCHECK(namespaces_.find(worker_pool) == namespaces_.end()); |
| + linked_ptr<TaskNamespace> task_set = make_linked_ptr(new TaskNamespace()); |
|
vmpstr
2014/01/06 20:46:31
Sorry if this is a silly question, but why linked_
reveman
2014/01/06 21:59:39
linked_ptr is preferred in this case as we'll neve
sohanjg
2014/01/07 08:35:23
We used linked_ptr instead of ScopedPtrHashMap bec
|
| + namespaces_[worker_pool] = task_set; |
| +} |
| + |
| +void TaskGraphRunner::Unregister(const WorkerPool* worker_pool) { |
| + base::AutoLock lock(lock_); |
| + |
| + DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); |
| + DCHECK_EQ(0u, namespaces_[worker_pool]->pending_tasks.size()); |
| + DCHECK_EQ(0u, namespaces_[worker_pool]->ready_to_run_tasks.size()); |
| + DCHECK_EQ(0u, namespaces_[worker_pool]->running_tasks.size()); |
| + DCHECK_EQ(0u, namespaces_[worker_pool]->completed_tasks.size()); |
| + |
| + namespaces_.erase(worker_pool); |
| +} |
| + |
| +void TaskGraphRunner::WaitForTasksToFinishRunning( |
| + const WorkerPool* worker_pool) { |
| + base::AutoLock lock(lock_); |
| + |
| + DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); |
| + TaskNamespace* task_namespace = namespaces_[worker_pool].get(); |
| - GraphNodeMap new_pending_tasks; |
| - GraphNodeMap new_running_tasks; |
| + while (!has_finished_running_tasks(task_namespace)) |
| + has_namespaces_with_finished_running_tasks_cv_.Wait(); |
| + |
| + // There may be other namespaces that have finished running |
| + // tasks, so wake up another origin thread. |
| + has_namespaces_with_finished_running_tasks_cv_.Signal(); |
| +} |
| + |
| +void TaskGraphRunner::SetTaskGraph(const WorkerPool* worker_pool, |
| + TaskGraph* graph) { |
| + TaskGraph new_pending_tasks; |
| + TaskGraph new_running_tasks; |
| new_pending_tasks.swap(*graph); |
| { |
| base::AutoLock lock(lock_); |
| + DCHECK(!shutdown_); |
| + DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); |
| + TaskNamespace* task_namespace = namespaces_[worker_pool].get(); |
| + |
| // First remove all completed tasks from |new_pending_tasks| and |
| // adjust number of dependencies. |
| - for (TaskVector::iterator it = completed_tasks_.begin(); |
| - it != completed_tasks_.end(); ++it) { |
| + for (TaskVector::iterator it = task_namespace->completed_tasks.begin(); |
| + it != task_namespace->completed_tasks.end(); ++it) { |
| internal::WorkerPoolTask* task = it->get(); |
| scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( |
| @@ -231,8 +239,8 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { |
| } |
| // Build new running task set. |
| - for (GraphNodeMap::iterator it = running_tasks_.begin(); |
| - it != running_tasks_.end(); ++it) { |
| + for (TaskGraph::iterator it = task_namespace->running_tasks.begin(); |
| + it != task_namespace->running_tasks.end(); ++it) { |
| internal::WorkerPoolTask* task = it->first; |
| // Transfer scheduled task value from |new_pending_tasks| to |
| // |new_running_tasks| if currently running. Value must be set to |
| @@ -242,8 +250,8 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { |
| } |
| // Build new "ready to run" tasks queue. |
| - ready_to_run_tasks_.clear(); |
| - for (GraphNodeMap::iterator it = new_pending_tasks.begin(); |
| + task_namespace->ready_to_run_tasks.clear(); |
| + for (TaskGraph::iterator it = new_pending_tasks.begin(); |
| it != new_pending_tasks.end(); ++it) { |
| internal::WorkerPoolTask* task = it->first; |
| DCHECK(task); |
| @@ -257,60 +265,78 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { |
| task->DidSchedule(); |
| if (!node->num_dependencies()) |
| - ready_to_run_tasks_.push_back(node); |
| + task_namespace->ready_to_run_tasks.push_back(node); |
| // Erase the task from old pending tasks. |
| - pending_tasks_.erase(task); |
| + task_namespace->pending_tasks.erase(task); |
| } |
| - // Rearrange the elements in |ready_to_run_tasks_| in such a way that |
| + // Rearrange the elements in |ready_to_run_tasks| in such a way that |
| // they form a heap. |
| - std::make_heap(ready_to_run_tasks_.begin(), |
| - ready_to_run_tasks_.end(), |
| + std::make_heap(task_namespace->ready_to_run_tasks.begin(), |
| + task_namespace->ready_to_run_tasks.end(), |
| CompareTaskPriority); |
| - completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size()); |
| + task_namespace->completed_tasks.reserve( |
| + task_namespace->completed_tasks.size() + |
| + task_namespace->pending_tasks.size()); |
| - // The items left in |pending_tasks_| need to be canceled. |
| - for (GraphNodeMap::const_iterator it = pending_tasks_.begin(); |
| - it != pending_tasks_.end(); |
| - ++it) { |
| - completed_tasks_.push_back(it->first); |
| + // The items left in |pending_tasks| need to be canceled. |
| + for (TaskGraph::const_iterator it = task_namespace->pending_tasks.begin(); |
| + it != task_namespace->pending_tasks.end(); ++it) { |
| + task_namespace->completed_tasks.push_back(it->first); |
| } |
| // Swap task sets. |
| // Note: old tasks are intentionally destroyed after releasing |lock_|. |
| - pending_tasks_.swap(new_pending_tasks); |
| - running_tasks_.swap(new_running_tasks); |
| + task_namespace->pending_tasks.swap(new_pending_tasks); |
| + task_namespace->running_tasks.swap(new_running_tasks); |
| - // If |ready_to_run_tasks_| is empty, it means we either have |
| + // If |ready_to_run_tasks| is empty, it means we either have |
| // running tasks, or we have no pending tasks. |
| - DCHECK(!ready_to_run_tasks_.empty() || |
| - (pending_tasks_.empty() || !running_tasks_.empty())); |
| + DCHECK(!task_namespace->ready_to_run_tasks.empty() || |
| + (task_namespace->pending_tasks.empty() || |
| + !task_namespace->running_tasks.empty())); |
| + |
| + // Build new "ready to run" task namespaces queue. |
| + ready_to_run_namespaces_.clear(); |
| + for (TaskNamespaceMap::iterator it = namespaces_.begin(); |
| + it != namespaces_.end(); ++it) { |
| + if (!it->second->ready_to_run_tasks.empty()) |
| + ready_to_run_namespaces_.push_back(it->second.get()); |
| + } |
| + |
| + // Rearrange the task namespaces in |ready_to_run_namespaces_| |
| + // in such a way that they form a heap. |
| + std::make_heap(ready_to_run_namespaces_.begin(), |
| + ready_to_run_namespaces_.end(), |
| + CompareTaskNamespacePriority); |
| // If there is more work available, wake up worker thread. |
| - if (!ready_to_run_tasks_.empty()) |
| + if (!ready_to_run_namespaces_.empty()) |
| has_ready_to_run_tasks_cv_.Signal(); |
| } |
| } |
| -void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) { |
| +void TaskGraphRunner::CollectCompletedTasks( |
| + const WorkerPool* worker_pool, TaskVector* completed_tasks) { |
| base::AutoLock lock(lock_); |
| DCHECK_EQ(0u, completed_tasks->size()); |
| - completed_tasks->swap(completed_tasks_); |
| + DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); |
| + completed_tasks->swap(namespaces_[worker_pool]->completed_tasks); |
| } |
| -void WorkerPool::Inner::Run() { |
| +void TaskGraphRunner::Run() { |
| base::AutoLock lock(lock_); |
| // Get a unique thread index. |
| int thread_index = next_thread_index_++; |
| while (true) { |
| - if (ready_to_run_tasks_.empty()) { |
| + if (ready_to_run_namespaces_.empty()) { |
| // Exit when shutdown is set and no more tasks are pending. |
| - if (shutdown_ && pending_tasks_.empty()) |
| + if (shutdown_) |
| break; |
| // Wait for more tasks. |
| @@ -318,18 +344,37 @@ void WorkerPool::Inner::Run() { |
| continue; |
| } |
| - // Take top priority task from |ready_to_run_tasks_|. |
| - std::pop_heap(ready_to_run_tasks_.begin(), |
| - ready_to_run_tasks_.end(), |
| + // Take top priority TaskNamespace from |ready_to_run_namespaces_|. |
| + std::pop_heap(ready_to_run_namespaces_.begin(), |
| + ready_to_run_namespaces_.end(), |
| + CompareTaskNamespacePriority); |
| + TaskNamespace* task_namespace = ready_to_run_namespaces_.back(); |
| + ready_to_run_namespaces_.pop_back(); |
| + DCHECK(!task_namespace->ready_to_run_tasks.empty()); |
| + |
| + // Take top priority task from |ready_to_run_tasks|. |
| + std::pop_heap(task_namespace->ready_to_run_tasks.begin(), |
| + task_namespace->ready_to_run_tasks.end(), |
| CompareTaskPriority); |
| scoped_refptr<internal::WorkerPoolTask> task( |
| - ready_to_run_tasks_.back()->task()); |
| - ready_to_run_tasks_.pop_back(); |
| + task_namespace->ready_to_run_tasks.back()->task()); |
| + task_namespace->ready_to_run_tasks.pop_back(); |
| + |
| + // Add task namespace back to |ready_to_run_namespaces_| if not |
| + // empty after taking top priority task. |
| + if (!task_namespace->ready_to_run_tasks.empty()) { |
| + ready_to_run_namespaces_.push_back(task_namespace); |
| + std::push_heap(ready_to_run_namespaces_.begin(), |
| + ready_to_run_namespaces_.end(), |
| + CompareTaskNamespacePriority); |
| + } |
| - // Move task from |pending_tasks_| to |running_tasks_|. |
| - DCHECK(pending_tasks_.contains(task.get())); |
| - DCHECK(!running_tasks_.contains(task.get())); |
| - running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get())); |
| + // Move task from |pending_tasks| to |running_tasks|. |
| + DCHECK(task_namespace->pending_tasks.contains(task.get())); |
| + DCHECK(!task_namespace->running_tasks.contains(task.get())); |
| + task_namespace->running_tasks.set( |
| + task.get(), |
| + task_namespace->pending_tasks.take_and_erase(task.get())); |
| // There may be more work available, so wake up another worker thread. |
| has_ready_to_run_tasks_cv_.Signal(); |
| @@ -348,8 +393,8 @@ void WorkerPool::Inner::Run() { |
| // Now iterate over all dependents to remove dependency and check |
| // if they are ready to run. |
| - scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase( |
| - task.get()); |
| + scoped_ptr<internal::GraphNode> node = |
| + task_namespace->running_tasks.take_and_erase(task.get()); |
| if (node) { |
|
reveman
2014/01/06 21:59:39
how about this here:
bool ready_to_run_namespaces_
sohanjg
2014/01/07 08:35:23
Done.
|
| for (internal::GraphNode::Vector::const_iterator it = |
| node->dependents().begin(); |
| @@ -360,16 +405,34 @@ void WorkerPool::Inner::Run() { |
| // Task is ready if it has no dependencies. Add it to |
| // |ready_to_run_tasks_|. |
| if (!dependent_node->num_dependencies()) { |
| - ready_to_run_tasks_.push_back(dependent_node); |
| - std::push_heap(ready_to_run_tasks_.begin(), |
| - ready_to_run_tasks_.end(), |
| + bool was_empty = task_namespace->ready_to_run_tasks.empty(); |
| + task_namespace->ready_to_run_tasks.push_back(dependent_node); |
| + std::push_heap(task_namespace->ready_to_run_tasks.begin(), |
| + task_namespace->ready_to_run_tasks.end(), |
| CompareTaskPriority); |
| + // Task namespace is ready if it has at least one ready |
| + // to run task. Add it to |ready_to_run_namespaces_| if |
| + // it just become ready. |
| + if (was_empty) { |
| + DCHECK(std::find(ready_to_run_namespaces_.begin(), |
| + ready_to_run_namespaces_.end(), |
| + task_namespace) == |
| + ready_to_run_namespaces_.end()); |
| + ready_to_run_namespaces_.push_back(task_namespace); |
| + std::push_heap(ready_to_run_namespaces_.begin(), |
|
vmpstr
2014/01/06 20:46:31
I think this has the same problem as before with t
reveman
2014/01/06 21:59:39
Oh, I completely missed that this was never fixed.
vmpstr
2014/01/06 22:07:20
Sounds good to me.
sohanjg
2014/01/07 08:35:23
Done.
|
| + ready_to_run_namespaces_.end(), |
| + CompareTaskNamespacePriority); |
| + } |
| } |
| } |
|
reveman
2014/01/06 21:59:39
and this here:
if (!ready_to_run_namespaces_has_he
sohanjg
2014/01/07 08:35:23
Done.
|
| } |
| // Finally add task to |completed_tasks_|. |
| - completed_tasks_.push_back(task); |
| + task_namespace->completed_tasks.push_back(task); |
| + |
| + // If namespace has finished running all tasks, wake up origin thread. |
| + if (has_finished_running_tasks(task_namespace)) |
| + has_namespaces_with_finished_running_tasks_cv_.Signal(); |
| } |
| // We noticed we should exit. Wake up the next worker so it knows it should |
| @@ -377,13 +440,83 @@ void WorkerPool::Inner::Run() { |
| has_ready_to_run_tasks_cv_.Signal(); |
| } |
| -WorkerPool::WorkerPool(size_t num_threads, |
| - const std::string& thread_name_prefix) |
| - : in_dispatch_completion_callbacks_(false), |
| - inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) { |
| +class CC_EXPORT CompositorRasterTaskGraphRunner |
| + : public TaskGraphRunner { |
| + public: |
| + CompositorRasterTaskGraphRunner() : TaskGraphRunner( |
| + WorkerPool::GetNumRasterThreads(), "CompositorRaster") { |
| + } |
| +}; |
| + |
| +base::LazyInstance<CompositorRasterTaskGraphRunner> |
| + g_task_graph_runner = LAZY_INSTANCE_INITIALIZER; |
| + |
| +} // namespace |
| + |
| +namespace internal { |
| + |
| +WorkerPoolTask::WorkerPoolTask() |
| + : did_schedule_(false), |
| + did_run_(false), |
| + did_complete_(false) { |
| +} |
| + |
| +WorkerPoolTask::~WorkerPoolTask() { |
| + DCHECK_EQ(did_schedule_, did_complete_); |
| + DCHECK(!did_run_ || did_schedule_); |
| + DCHECK(!did_run_ || did_complete_); |
| +} |
| + |
| +void WorkerPoolTask::DidSchedule() { |
| + DCHECK(!did_complete_); |
| + did_schedule_ = true; |
| +} |
| + |
| +void WorkerPoolTask::WillRun() { |
| + DCHECK(did_schedule_); |
| + DCHECK(!did_complete_); |
| + DCHECK(!did_run_); |
| +} |
| + |
| +void WorkerPoolTask::DidRun() { |
| + did_run_ = true; |
| +} |
| + |
| +void WorkerPoolTask::WillComplete() { |
| + DCHECK(!did_complete_); |
| +} |
| + |
| +void WorkerPoolTask::DidComplete() { |
| + DCHECK(did_schedule_); |
| + DCHECK(!did_complete_); |
| + did_complete_ = true; |
| +} |
| + |
| +bool WorkerPoolTask::HasFinishedRunning() const { |
| + return did_run_; |
| +} |
| + |
| +bool WorkerPoolTask::HasCompleted() const { |
| + return did_complete_; |
| +} |
| + |
| +GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) |
| + : task_(task), |
| + priority_(priority), |
| + num_dependencies_(0) { |
| +} |
| + |
| +GraphNode::~GraphNode() { |
| +} |
| + |
| +} // namespace internal |
| + |
| +WorkerPool::WorkerPool() : in_dispatch_completion_callbacks_(false) { |
| + g_task_graph_runner.Pointer()->Register(this); |
| } |
| WorkerPool::~WorkerPool() { |
| + g_task_graph_runner.Pointer()->Unregister(this); |
| } |
| void WorkerPool::Shutdown() { |
| @@ -391,7 +524,7 @@ void WorkerPool::Shutdown() { |
| DCHECK(!in_dispatch_completion_callbacks_); |
| - inner_->Shutdown(); |
| + g_task_graph_runner.Pointer()->WaitForTasksToFinishRunning(this); |
| } |
| void WorkerPool::CheckForCompletedTasks() { |
| @@ -400,7 +533,7 @@ void WorkerPool::CheckForCompletedTasks() { |
| DCHECK(!in_dispatch_completion_callbacks_); |
| TaskVector completed_tasks; |
| - inner_->CollectCompletedTasks(&completed_tasks); |
| + g_task_graph_runner.Pointer()->CollectCompletedTasks(this, &completed_tasks); |
| ProcessCompletedTasks(completed_tasks); |
| } |
| @@ -431,7 +564,7 @@ void WorkerPool::SetTaskGraph(TaskGraph* graph) { |
| DCHECK(!in_dispatch_completion_callbacks_); |
| - inner_->SetTaskGraph(graph); |
| + g_task_graph_runner.Pointer()->SetTaskGraph(this, graph); |
| } |
| } // namespace cc |