Chromium Code Reviews| Index: cc/resources/worker_pool.cc |
| diff --git a/cc/resources/worker_pool.cc b/cc/resources/worker_pool.cc |
| old mode 100644 |
| new mode 100755 |
| index dca0c704f09dbc2559bf3f4adf860724ce9a6240..e6fe57baedea3cfdbc3f1e397787d402e16b19d6 |
| --- a/cc/resources/worker_pool.cc |
| +++ b/cc/resources/worker_pool.cc |
| @@ -8,94 +8,50 @@ |
| #include <queue> |
| #include "base/bind.h" |
| +#include "base/command_line.h" |
| #include "base/containers/hash_tables.h" |
| #include "base/debug/trace_event.h" |
| +#include "base/lazy_instance.h" |
| #include "base/strings/stringprintf.h" |
| #include "base/synchronization/condition_variable.h" |
| #include "base/threading/simple_thread.h" |
| #include "base/threading/thread_restrictions.h" |
| #include "cc/base/scoped_ptr_deque.h" |
| +#include "cc/base/switches.h" |
| namespace cc { |
| -namespace internal { |
| - |
| -WorkerPoolTask::WorkerPoolTask() |
| - : did_schedule_(false), |
| - did_run_(false), |
| - did_complete_(false) { |
| -} |
| - |
| -WorkerPoolTask::~WorkerPoolTask() { |
| - DCHECK_EQ(did_schedule_, did_complete_); |
| - DCHECK(!did_run_ || did_schedule_); |
| - DCHECK(!did_run_ || did_complete_); |
| -} |
| - |
| -void WorkerPoolTask::DidSchedule() { |
| - DCHECK(!did_complete_); |
| - did_schedule_ = true; |
| -} |
| - |
| -void WorkerPoolTask::WillRun() { |
| - DCHECK(did_schedule_); |
| - DCHECK(!did_complete_); |
| - DCHECK(!did_run_); |
| -} |
| - |
| -void WorkerPoolTask::DidRun() { |
| - did_run_ = true; |
| -} |
| - |
| -void WorkerPoolTask::WillComplete() { |
| - DCHECK(!did_complete_); |
| -} |
| - |
| -void WorkerPoolTask::DidComplete() { |
| - DCHECK(did_schedule_); |
| - DCHECK(!did_complete_); |
| - did_complete_ = true; |
| -} |
| - |
| -bool WorkerPoolTask::HasFinishedRunning() const { |
| - return did_run_; |
| -} |
| - |
| -bool WorkerPoolTask::HasCompleted() const { |
| - return did_complete_; |
| -} |
| - |
| -GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) |
| - : task_(task), |
| - priority_(priority), |
| - num_dependencies_(0) { |
| -} |
| - |
| -GraphNode::~GraphNode() { |
| -} |
| - |
| -} // namespace internal |
| +namespace { |
| // Internal to the worker pool. Any data or logic that needs to be |
| // shared between threads lives in this class. All members are guarded |
| // by |lock_|. |
| -class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
| +class WorkerInner : public base::DelegateSimpleThread::Delegate { |
| public: |
| - Inner(size_t num_threads, const std::string& thread_name_prefix); |
| - virtual ~Inner(); |
| + WorkerInner(size_t num_threads, const std::string& thread_name_prefix); |
| + virtual ~WorkerInner(); |
| void Shutdown(); |
| + typedef base::ScopedPtrHashMap<internal::WorkerPoolTask*, internal::GraphNode> |
| + GraphNodeMap; |
| + typedef GraphNodeMap TaskGraph; |
| + typedef base::ScopedPtrHashMap<WorkerPool*, GraphNodeMap> |
| + TaskMap; |
| + typedef std::vector<scoped_refptr<internal::WorkerPoolTask> > TaskVector; |
| + |
| // Schedule running of tasks in |graph|. Tasks previously scheduled but |
| // no longer needed will be canceled unless already running. Canceled |
| // tasks are moved to |completed_tasks_| without being run. The result |
| // is that once scheduled, a task is guaranteed to end up in the |
| // |completed_tasks_| queue even if they later get canceled by another |
| // call to SetTaskGraph(). |
| - void SetTaskGraph(TaskGraph* graph); |
| + |
| + void SetTaskGraph(TaskGraph* graph, WorkerPool* worker_pool); |
| // Collect all completed tasks in |completed_tasks|. |
| - void CollectCompletedTasks(TaskVector* completed_tasks); |
| + void CollectCompletedTasks(TaskVector* completed_tasks, WorkerPool* worker_pool); |
| + |
| private: |
| class PriorityComparator { |
| @@ -111,6 +67,39 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
| } |
| }; |
| + // Ordered set of tasks that are ready to run. |
| + typedef std::priority_queue<internal::GraphNode*, |
| + std::vector<internal::GraphNode*>, |
| + PriorityComparator> TaskQueue; |
| + // TaskQueue ready_to_run_tasks_; |
| + |
| +class TaskSet { |
| + public: |
| + GraphNodeMap pending_tasks_; |
| + GraphNodeMap running_tasks_; |
| + TaskVector completed_tasks_; |
| + TaskQueue ready_to_run_tasks_; |
| +}; |
| + |
| +class TaskComparator { |
| + public: |
| + bool operator()(const TaskSet* a, const TaskSet* b) { |
| + if (a->ready_to_run_tasks_.top()->priority() != b->ready_to_run_tasks_.top()->priority()) |
| + return a->ready_to_run_tasks_.top()->priority() > b->ready_to_run_tasks_.top()->priority(); |
| + |
| + return a->ready_to_run_tasks_.top()->dependents().size() > b->ready_to_run_tasks_.top()->dependents().size(); |
| + } |
| + }; |
| + |
| + typedef std::map<const WorkerPool*, TaskSet*> TaskMapper; |
|
reveman
2013/12/09 20:19:18
Could you just store TaskSets by value instead of
sohanjg
2013/12/10 07:19:37
We cannot use values, as we discussed in earlier i
|
| + |
| + TaskMapper tasks_; |
| + |
| + typedef std::priority_queue<TaskSet*, |
| + std::vector<TaskSet*>, |
| + TaskComparator> TaskPriorityQueue; |
| + |
| + TaskPriorityQueue shared_ready_to_run_tasks_; |
| // Overridden from base::DelegateSimpleThread: |
| virtual void Run() OVERRIDE; |
| @@ -131,27 +120,20 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
| // are pending. |
| bool shutdown_; |
| - // This set contains all pending tasks. |
| - GraphNodeMap pending_tasks_; |
| + ScopedPtrDeque<base::DelegateSimpleThread> workers_; |
| - // Ordered set of tasks that are ready to run. |
| - typedef std::priority_queue<internal::GraphNode*, |
| - std::vector<internal::GraphNode*>, |
| - PriorityComparator> TaskQueue; |
| - TaskQueue ready_to_run_tasks_; |
| + DISALLOW_COPY_AND_ASSIGN(WorkerInner); |
| +}; |
| - // This set contains all currently running tasks. |
| - GraphNodeMap running_tasks_; |
| +class CC_EXPORT DerivedInner : public WorkerInner { |
| + public: |
| + DerivedInner(); |
| +}; |
| - // Completed tasks not yet collected by origin thread. |
| - TaskVector completed_tasks_; |
| +base::LazyInstance<DerivedInner> g_workerpool_inner; |
| - ScopedPtrDeque<base::DelegateSimpleThread> workers_; |
| - DISALLOW_COPY_AND_ASSIGN(Inner); |
| -}; |
| - |
| -WorkerPool::Inner::Inner( |
| +WorkerInner::WorkerInner( |
| size_t num_threads, const std::string& thread_name_prefix) |
| : lock_(), |
| has_ready_to_run_tasks_cv_(&lock_), |
| @@ -175,18 +157,17 @@ WorkerPool::Inner::Inner( |
| } |
| } |
| -WorkerPool::Inner::~Inner() { |
| +WorkerInner::~WorkerInner() { |
| base::AutoLock lock(lock_); |
| DCHECK(shutdown_); |
| - |
| - DCHECK_EQ(0u, pending_tasks_.size()); |
| - DCHECK_EQ(0u, ready_to_run_tasks_.size()); |
| - DCHECK_EQ(0u, running_tasks_.size()); |
| - DCHECK_EQ(0u, completed_tasks_.size()); |
| + // DCHECK_EQ(0u, wp_->pending_tasks_.size()); |
| + DCHECK_EQ(0u, shared_ready_to_run_tasks_.size()); |
| + // DCHECK_EQ(0u, wp_->running_tasks_.size()); |
| + // DCHECK_EQ(0u, wp_->completed_tasks_.size()); |
| } |
| -void WorkerPool::Inner::Shutdown() { |
| +void WorkerInner::Shutdown() { |
| { |
| base::AutoLock lock(lock_); |
| @@ -207,46 +188,66 @@ void WorkerPool::Inner::Shutdown() { |
| } |
| } |
| -void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { |
| +void WorkerInner::SetTaskGraph(TaskGraph* graph, WorkerPool* worker_pool) { |
| // It is OK to call SetTaskGraph() after shutdown if |graph| is empty. |
| DCHECK(graph->empty() || !shutdown_); |
| GraphNodeMap new_pending_tasks; |
| GraphNodeMap new_running_tasks; |
| TaskQueue new_ready_to_run_tasks; |
| + TaskVector temp_completed_tasks_; |
| + GraphNodeMap temp_pending_tasks; |
| new_pending_tasks.swap(*graph); |
| { |
| base::AutoLock lock(lock_); |
| + |
| + |
| + // Create Task Set |
| + if (tasks_.count(worker_pool) == 0) { |
| + |
| + TaskSet* task_set= new TaskSet(); |
| + task_set->completed_tasks_ = temp_completed_tasks_; |
| + task_set->ready_to_run_tasks_ = new_ready_to_run_tasks; |
| + task_set->running_tasks_.swap(new_running_tasks); |
| + task_set->pending_tasks_.swap(temp_pending_tasks); |
| + tasks_.insert(std::pair<const WorkerPool*, TaskSet*>(worker_pool, task_set)); |
| + delete task_set; |
|
reveman
2013/12/09 20:19:18
I don't think you want to delete the task set here
sohanjg
2013/12/10 07:19:37
As mentioned above, there is inheritance issue wit
|
| + |
| + } |
| + |
| // First remove all completed tasks from |new_pending_tasks| and |
| // adjust number of dependencies. |
| - for (TaskVector::iterator it = completed_tasks_.begin(); |
| - it != completed_tasks_.end(); ++it) { |
| - internal::WorkerPoolTask* task = it->get(); |
| - |
| - scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( |
| - task); |
| - if (node) { |
| - for (internal::GraphNode::Vector::const_iterator it = |
| - node->dependents().begin(); |
| - it != node->dependents().end(); ++it) { |
| - internal::GraphNode* dependent_node = *it; |
| - dependent_node->remove_dependency(); |
| + if (!tasks_[worker_pool]->completed_tasks_.empty()) { |
|
reveman
2013/12/09 20:19:18
why is this empty() check now needed?
sohanjg
2013/12/10 07:19:37
This and other size/empty checks are preventive ch
|
| + for (TaskVector::iterator it = tasks_[worker_pool]->completed_tasks_.begin(); |
|
reveman
2013/12/09 20:19:18
Please avoid a lookup in tasks_ for every use and
|
| + it != tasks_[worker_pool]->completed_tasks_.end(); ++it) { |
| + internal::WorkerPoolTask* task = it->get(); |
| + scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( |
| + task); |
| + if (node) { |
| + for (internal::GraphNode::Vector::const_iterator it = |
| + node->dependents().begin(); |
| + it != node->dependents().end(); ++it) { |
| + internal::GraphNode* dependent_node = *it; |
| + dependent_node->remove_dependency(); |
| + } |
| } |
| } |
| } |
| // Build new running task set. |
| - for (GraphNodeMap::iterator it = running_tasks_.begin(); |
| - it != running_tasks_.end(); ++it) { |
| - internal::WorkerPoolTask* task = it->first; |
| - // Transfer scheduled task value from |new_pending_tasks| to |
| - // |new_running_tasks| if currently running. Value must be set to |
| - // NULL if |new_pending_tasks| doesn't contain task. This does |
| - // the right in both cases. |
| - new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); |
| + if (!tasks_[worker_pool]->running_tasks_.empty()) { |
|
reveman
2013/12/09 20:19:18
why the empty() check?
|
| + for (GraphNodeMap::iterator it = tasks_[worker_pool]->running_tasks_.begin(); |
| + it != tasks_[worker_pool]->running_tasks_.end(); ++it) { |
| + internal::WorkerPoolTask* task = it->first; |
| + // Transfer scheduled task value from |new_pending_tasks| to |
| + // |new_running_tasks| if currently running. Value must be set to |
| + // NULL if |new_pending_tasks| doesn't contain task. This does |
| + // the right in both cases. |
| + new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); |
| + } |
| } |
| // Build new "ready to run" tasks queue. |
| @@ -264,82 +265,105 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { |
| // Note: This is only for debugging purposes. |
| task->DidSchedule(); |
| - if (!node->num_dependencies()) |
| + if (!node->num_dependencies()) { |
| new_ready_to_run_tasks.push(node); |
| - |
| + } |
| // Erase the task from old pending tasks. |
| - pending_tasks_.erase(task); |
| - } |
| - |
| - completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size()); |
| + if (tasks_[worker_pool]->pending_tasks_.size() > 0) |
|
reveman
2013/12/09 20:19:18
why the size() check?
|
| + tasks_[worker_pool]->pending_tasks_.erase(task); |
| - // The items left in |pending_tasks_| need to be canceled. |
| - for (GraphNodeMap::const_iterator it = pending_tasks_.begin(); |
| - it != pending_tasks_.end(); |
| - ++it) { |
| - completed_tasks_.push_back(it->first); |
| } |
| - // Swap task sets. |
| - // Note: old tasks are intentionally destroyed after releasing |lock_|. |
| - pending_tasks_.swap(new_pending_tasks); |
| - running_tasks_.swap(new_running_tasks); |
| - std::swap(ready_to_run_tasks_, new_ready_to_run_tasks); |
| + if (!tasks_[worker_pool]->pending_tasks_.empty()) |
|
reveman
2013/12/09 20:19:18
and why check empty() here?
|
| + tasks_[worker_pool]->completed_tasks_.reserve(tasks_[worker_pool]->completed_tasks_.size() + tasks_[worker_pool]->pending_tasks_.size()); |
| + |
| + // The items left in |pending_tasks_| need to be canceled. |
| + if (!tasks_[worker_pool]->pending_tasks_.empty()) { |
|
reveman
2013/12/09 20:19:18
and here
|
| + for (GraphNodeMap::const_iterator it = tasks_[worker_pool]->pending_tasks_.begin(); |
| + it != tasks_[worker_pool]->pending_tasks_.end(); |
| + ++it) { |
| + // completed_tasks_.push_back(it->first); |
| + tasks_[worker_pool]->completed_tasks_.push_back(it->first); |
| + // temp_completed_tasks_.push_back(it->first); |
| + } |
| + } |
| + |
| + tasks_[worker_pool]->pending_tasks_.swap(new_pending_tasks); |
| + tasks_[worker_pool]->running_tasks_.swap(new_running_tasks); |
| + std::swap(tasks_[worker_pool]->ready_to_run_tasks_, new_ready_to_run_tasks); |
| + shared_ready_to_run_tasks_.push(tasks_[worker_pool]); |
|
reveman
2013/12/09 20:19:18
what happens to the old TaskSet pointer in shared_
sohanjg
2013/12/10 07:19:37
shared_ready_to_run_tasks_ is a prioirity queue, s
|
| // If |ready_to_run_tasks_| is empty, it means we either have |
| // running tasks, or we have no pending tasks. |
| - DCHECK(!ready_to_run_tasks_.empty() || |
| - (pending_tasks_.empty() || !running_tasks_.empty())); |
| + DCHECK(!tasks_[worker_pool]->ready_to_run_tasks_.empty() || |
| + (tasks_[worker_pool]->pending_tasks_.empty() || !tasks_[worker_pool]->running_tasks_.empty())); |
| // If there is more work available, wake up worker thread. |
| - if (!ready_to_run_tasks_.empty()) |
| + if (!tasks_[worker_pool]->ready_to_run_tasks_.empty()) |
| has_ready_to_run_tasks_cv_.Signal(); |
| } |
| } |
| -void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) { |
| +void WorkerInner::CollectCompletedTasks |
| + (TaskVector* completed_tasks, WorkerPool* worker_pool) { |
| base::AutoLock lock(lock_); |
| DCHECK_EQ(0u, completed_tasks->size()); |
| - completed_tasks->swap(completed_tasks_); |
| + if (!shared_ready_to_run_tasks_.empty()) |
|
reveman
2013/12/09 20:19:18
why the empty check?
|
| + completed_tasks->swap(shared_ready_to_run_tasks_.top()->completed_tasks_); |
| + |
| } |
| -void WorkerPool::Inner::Run() { |
| +void WorkerInner::Run() { |
| base::AutoLock lock(lock_); |
| - // Get a unique thread index. |
| +// Get a unique thread index. |
| int thread_index = next_thread_index_++; |
| + // bool get_new_taskqueue = true; |
| + TaskSet* ready_to_run_task_set_ = new TaskSet(); |
|
reveman
2013/12/09 20:19:18
why this heap allocation? Also don't use underscor
sohanjg
2013/12/10 07:19:37
Will take care of it.
|
| while (true) { |
| - if (ready_to_run_tasks_.empty()) { |
| - // Exit when shutdown is set and no more tasks are pending. |
| - if (shutdown_ && pending_tasks_.empty()) |
| - break; |
| - |
| - // Wait for more tasks. |
| - has_ready_to_run_tasks_cv_.Wait(); |
| - continue; |
| + if (shared_ready_to_run_tasks_.empty()) { |
| + // Exit when shutdown is set and no more tasks are pending. |
| + // if (shutdown_ && pending_tasks_.empty()) |
| + // if (shutdown_ && ready_to_run_task_set_->pending_tasks_.empty()) |
| + // break; |
| + // Wait for more tasks. |
| + has_ready_to_run_tasks_cv_.Wait(); |
| + continue; |
| + } |
| + |
| + |
| + // Take top priority TaskSet from |shared_ready_to_run_tasks_|. |
| + if (ready_to_run_task_set_ == NULL || ready_to_run_task_set_->ready_to_run_tasks_.empty()) { |
|
reveman
2013/12/09 20:19:18
I don't understand why you have these checks here.
sohanjg
2013/12/10 07:19:37
This check is to ensure that, ready_to_run_task_se
|
| + ready_to_run_task_set_ = shared_ready_to_run_tasks_.top(); |
| + shared_ready_to_run_tasks_.pop(); |
| } |
| // Take top priority task from |ready_to_run_tasks_|. |
| scoped_refptr<internal::WorkerPoolTask> task( |
| - ready_to_run_tasks_.top()->task()); |
| - ready_to_run_tasks_.pop(); |
| + ready_to_run_task_set_->ready_to_run_tasks_.top()->task()); |
| + ready_to_run_task_set_->ready_to_run_tasks_.pop(); |
|
reveman
2013/12/09 20:19:18
You'll have to insert the TaskSet in the shared qu
sohanjg
2013/12/10 07:19:37
Why would we need to re-insert again ? Wont SetTas
|
| + |
| // Move task from |pending_tasks_| to |running_tasks_|. |
| - DCHECK(pending_tasks_.contains(task.get())); |
| - DCHECK(!running_tasks_.contains(task.get())); |
| - running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get())); |
| + DCHECK(ready_to_run_task_set_->pending_tasks_.contains(task.get())); |
| + DCHECK(!ready_to_run_task_set_->running_tasks_.contains(task.get())); |
| + |
| + ready_to_run_task_set_->running_tasks_.set(task.get(), ready_to_run_task_set_->pending_tasks_.take_and_erase(task.get())); |
| // There may be more work available, so wake up another worker thread. |
| has_ready_to_run_tasks_cv_.Signal(); |
| + // if (ready_to_run_task_set_->ready_to_run_tasks_.empty()) |
| + // get_new_taskqueue = true; |
| + |
| // Call WillRun() before releasing |lock_| and running task. |
| task->WillRun(); |
| { |
| base::AutoUnlock unlock(lock_); |
| - |
| + // VLOG(0)<<__FUNCTION__<<" Sohan - RunOnWorkerThread"; |
| task->RunOnWorkerThread(thread_index); |
| } |
| @@ -348,35 +372,102 @@ void WorkerPool::Inner::Run() { |
| // Now iterate over all dependents to remove dependency and check |
| // if they are ready to run. |
| - scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase( |
| + if (ready_to_run_task_set_->running_tasks_.size() > 0) { |
|
reveman
2013/12/09 20:19:18
why the size check?
|
| + scoped_ptr<internal::GraphNode> node = ready_to_run_task_set_->running_tasks_.take_and_erase( |
| task.get()); |
| - if (node) { |
| - for (internal::GraphNode::Vector::const_iterator it = |
| - node->dependents().begin(); |
| - it != node->dependents().end(); ++it) { |
| - internal::GraphNode* dependent_node = *it; |
| - |
| - dependent_node->remove_dependency(); |
| - // Task is ready if it has no dependencies. Add it to |
| - // |ready_to_run_tasks_|. |
| - if (!dependent_node->num_dependencies()) |
| - ready_to_run_tasks_.push(dependent_node); |
| + if (node) { |
| + for (internal::GraphNode::Vector::const_iterator it = |
| + node->dependents().begin(); |
| + it != node->dependents().end(); ++it) { |
| + internal::GraphNode* dependent_node = *it; |
| + |
| + dependent_node->remove_dependency(); |
| + // Task is ready if it has no dependencies. Add it to |
| + // |ready_to_run_tasks_|. |
| + if (!dependent_node->num_dependencies()) |
| + ready_to_run_task_set_->ready_to_run_tasks_.push(dependent_node); |
| + } |
| } |
| } |
| // Finally add task to |completed_tasks_|. |
| - completed_tasks_.push_back(task); |
| + ready_to_run_task_set_->completed_tasks_.push_back(task); |
| + |
| } |
| + delete ready_to_run_task_set_; |
| // We noticed we should exit. Wake up the next worker so it knows it should |
| // exit as well (because the Shutdown() code only signals once). |
| has_ready_to_run_tasks_cv_.Signal(); |
| } |
| +// Derived WorkerInner Ctor |
| +DerivedInner::DerivedInner(): WorkerInner(cc::switches::GetNumRasterThreads(), "CompositorRaster") { |
| + } |
| +} // namespace anonymous |
| + |
| +namespace internal { |
| + |
| +WorkerPoolTask::WorkerPoolTask() |
| + : did_schedule_(false), |
| + did_run_(false), |
| + did_complete_(false) { |
| +} |
| + |
| +WorkerPoolTask::~WorkerPoolTask() { |
| + DCHECK_EQ(did_schedule_, did_complete_); |
| + DCHECK(!did_run_ || did_schedule_); |
| + DCHECK(!did_run_ || did_complete_); |
| +} |
| + |
| +void WorkerPoolTask::DidSchedule() { |
| + DCHECK(!did_complete_); |
| + did_schedule_ = true; |
| +} |
| + |
| +void WorkerPoolTask::WillRun() { |
| + DCHECK(did_schedule_); |
| + DCHECK(!did_complete_); |
| + DCHECK(!did_run_); |
| +} |
| + |
| +void WorkerPoolTask::DidRun() { |
| + did_run_ = true; |
| +} |
| + |
| +void WorkerPoolTask::WillComplete() { |
| + DCHECK(!did_complete_); |
| +} |
| + |
| +void WorkerPoolTask::DidComplete() { |
| + DCHECK(did_schedule_); |
| + DCHECK(!did_complete_); |
| + did_complete_ = true; |
| +} |
| + |
| +bool WorkerPoolTask::HasFinishedRunning() const { |
| + return did_run_; |
| +} |
| + |
| +bool WorkerPoolTask::HasCompleted() const { |
| + return did_complete_; |
| +} |
| + |
| +GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) |
| + : task_(task), |
| + priority_(priority), |
| + num_dependencies_(0) { |
| +} |
| + |
| +GraphNode::~GraphNode() { |
| +} |
| + |
| +} // namespace internal |
| + |
| + |
| WorkerPool::WorkerPool(size_t num_threads, |
| const std::string& thread_name_prefix) |
| - : in_dispatch_completion_callbacks_(false), |
| - inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) { |
| + : in_dispatch_completion_callbacks_(false) { |
| } |
| WorkerPool::~WorkerPool() { |
| @@ -386,8 +477,7 @@ void WorkerPool::Shutdown() { |
| TRACE_EVENT0("cc", "WorkerPool::Shutdown"); |
| DCHECK(!in_dispatch_completion_callbacks_); |
| - |
| - inner_->Shutdown(); |
| + g_workerpool_inner.Pointer()->Shutdown(); |
| } |
| void WorkerPool::CheckForCompletedTasks() { |
| @@ -396,7 +486,7 @@ void WorkerPool::CheckForCompletedTasks() { |
| DCHECK(!in_dispatch_completion_callbacks_); |
| TaskVector completed_tasks; |
| - inner_->CollectCompletedTasks(&completed_tasks); |
| + g_workerpool_inner.Pointer()->CollectCompletedTasks(&completed_tasks, this); |
| ProcessCompletedTasks(completed_tasks); |
| } |
| @@ -426,8 +516,7 @@ void WorkerPool::SetTaskGraph(TaskGraph* graph) { |
| "num_tasks", graph->size()); |
| DCHECK(!in_dispatch_completion_callbacks_); |
| - |
| - inner_->SetTaskGraph(graph); |
| + g_workerpool_inner.Pointer()->SetTaskGraph(graph, this); |
| } |
| } // namespace cc |