Index: cc/resources/worker_pool.cc |
diff --git a/cc/resources/worker_pool.cc b/cc/resources/worker_pool.cc |
old mode 100644 |
new mode 100755 |
index dca0c704f09dbc2559bf3f4adf860724ce9a6240..d7f8379963045090162ce8e87668b0a28d8fbbdd |
--- a/cc/resources/worker_pool.cc |
+++ b/cc/resources/worker_pool.cc |
@@ -8,94 +8,52 @@ |
#include <queue> |
#include "base/bind.h" |
+#include "base/command_line.h" |
#include "base/containers/hash_tables.h" |
#include "base/debug/trace_event.h" |
+#include "base/lazy_instance.h" |
#include "base/strings/stringprintf.h" |
#include "base/synchronization/condition_variable.h" |
#include "base/threading/simple_thread.h" |
#include "base/threading/thread_restrictions.h" |
#include "cc/base/scoped_ptr_deque.h" |
+#include "cc/base/switches.h" |
namespace cc { |
-namespace internal { |
- |
-WorkerPoolTask::WorkerPoolTask() |
- : did_schedule_(false), |
- did_run_(false), |
- did_complete_(false) { |
-} |
- |
-WorkerPoolTask::~WorkerPoolTask() { |
- DCHECK_EQ(did_schedule_, did_complete_); |
- DCHECK(!did_run_ || did_schedule_); |
- DCHECK(!did_run_ || did_complete_); |
-} |
- |
-void WorkerPoolTask::DidSchedule() { |
- DCHECK(!did_complete_); |
- did_schedule_ = true; |
-} |
- |
-void WorkerPoolTask::WillRun() { |
- DCHECK(did_schedule_); |
- DCHECK(!did_complete_); |
- DCHECK(!did_run_); |
-} |
- |
-void WorkerPoolTask::DidRun() { |
- did_run_ = true; |
-} |
- |
-void WorkerPoolTask::WillComplete() { |
- DCHECK(!did_complete_); |
-} |
- |
-void WorkerPoolTask::DidComplete() { |
- DCHECK(did_schedule_); |
- DCHECK(!did_complete_); |
- did_complete_ = true; |
-} |
- |
-bool WorkerPoolTask::HasFinishedRunning() const { |
- return did_run_; |
-} |
- |
-bool WorkerPoolTask::HasCompleted() const { |
- return did_complete_; |
-} |
- |
-GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) |
- : task_(task), |
- priority_(priority), |
- num_dependencies_(0) { |
-} |
- |
-GraphNode::~GraphNode() { |
-} |
- |
-} // namespace internal |
+namespace { |
// Internal to the worker pool. Any data or logic that needs to be |
// shared between threads lives in this class. All members are guarded |
// by |lock_|. |
-class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
+class WorkerInner : public base::DelegateSimpleThread::Delegate { |
reveman
2013/12/16 17:06:30
We need a better name than "WorkerInner" for this.
sohanjg
2013/12/17 14:58:12
Done.
|
public: |
- Inner(size_t num_threads, const std::string& thread_name_prefix); |
- virtual ~Inner(); |
+ WorkerInner(size_t num_threads, const std::string& thread_name_prefix); |
+ virtual ~WorkerInner(); |
void Shutdown(); |
+ typedef base::ScopedPtrHashMap<internal::WorkerPoolTask*, internal::GraphNode> |
+ GraphNodeMap; |
+ typedef GraphNodeMap TaskGraph; |
reveman
2013/12/16 17:06:30
How about "typedef WorkerPool::TaskGraph TaskGraph
sohanjg
2013/12/17 14:58:12
Done.
I had to pull the TaskGraph and Taskvector f
|
+ typedef base::ScopedPtrHashMap<WorkerPool*, GraphNodeMap> |
+ TaskMap; |
reveman
2013/12/16 17:06:30
Not a task map. Should it be TaskGraphMap? Also th
sohanjg
2013/12/17 14:58:12
Done.
This code was not used
|
+ typedef std::vector<scoped_refptr<internal::WorkerPoolTask> > TaskVector; |
+ |
// Schedule running of tasks in |graph|. Tasks previously scheduled but |
// no longer needed will be canceled unless already running. Canceled |
// tasks are moved to |completed_tasks_| without being run. The result |
// is that once scheduled, a task is guaranteed to end up in the |
// |completed_tasks_| queue even if they later get canceled by another |
// call to SetTaskGraph(). |
- void SetTaskGraph(TaskGraph* graph); |
+ void SetTaskGraph(TaskGraph* graph, WorkerPool* worker_pool); |
reveman
2013/12/16 17:06:30
"const WorkerPool*" instead and I prefer the worke
sohanjg
2013/12/17 14:58:12
Done.
|
// Collect all completed tasks in |completed_tasks|. |
- void CollectCompletedTasks(TaskVector* completed_tasks); |
+ void CollectCompletedTasks(TaskVector* completed_tasks, |
+ WorkerPool* worker_pool); |
reveman
2013/12/16 17:06:30
same here.
sohanjg
2013/12/17 14:58:12
Done.
|
+ |
+ void Register(const WorkerPool* worker_pool); |
+ void Unregister(const WorkerPool* worker_pool); |
reveman
2013/12/16 17:06:30
Please keep these sorted the same way as the imple
sohanjg
2013/12/17 14:58:12
Done.
|
private: |
class PriorityComparator { |
@@ -111,6 +69,42 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
} |
}; |
+ // Ordered set of tasks that are ready to run. |
+ typedef std::priority_queue<internal::GraphNode*, |
+ std::vector<internal::GraphNode*>, |
+ PriorityComparator> TaskQueue; |
+ |
+ class TaskSet { |
reveman
2013/12/16 17:06:30
TaskNamespace?
sohanjg
2013/12/17 14:58:12
Done.
|
+ public: |
reveman
2013/12/16 17:06:30
Make this a struct instead and remove "_" suffix f
sohanjg
2013/12/17 14:58:12
Done.
|
+ GraphNodeMap pending_tasks_; |
+ GraphNodeMap running_tasks_; |
+ TaskVector completed_tasks_; |
+ TaskQueue ready_to_run_tasks_; |
+ }; |
+ |
+ class TaskComparator { |
reveman
2013/12/16 17:06:30
This is a priority comparator too. How about renam
sohanjg
2013/12/17 14:58:12
Done.
|
+ public: |
+ bool operator()(const TaskSet* a, |
+ const TaskSet* b) { |
+ if (a->ready_to_run_tasks_.top()->priority() |
+ != b->ready_to_run_tasks_.top()->priority()) |
+ return a->ready_to_run_tasks_.top()->priority() > |
+ b->ready_to_run_tasks_.top()->priority(); |
+ |
+ return a->ready_to_run_tasks_.top()->dependents().size() > |
+ b->ready_to_run_tasks_.top()->dependents().size(); |
+ } |
reveman
2013/12/16 17:06:30
Could you reuse the logic from the above comparato
sohanjg
2013/12/17 14:58:12
There are some issue with constantness when invoki
|
+ }; |
+ |
+ typedef std::map<const WorkerPool*, TaskSet*> TaskMapper; |
reveman
2013/12/16 17:06:30
TaskNamespaceMap? Also, please use linked_ptr<Task
sohanjg
2013/12/17 14:58:12
Done.
|
+ |
+ TaskMapper tasks_; |
reveman
2013/12/16 17:06:30
namespaces_? Also, please move all member variable
sohanjg
2013/12/17 14:58:12
Done.
|
+ |
+ typedef std::priority_queue<TaskSet*, |
+ std::vector<TaskSet*>, |
+ TaskComparator> TaskPriorityQueue; |
reveman
2013/12/16 17:06:30
NamespaceQueue?
sohanjg
2013/12/17 14:58:12
Done.
|
+ |
+ TaskPriorityQueue shared_ready_to_run_tasks_; |
reveman
2013/12/16 17:06:30
ready_to_run_namespaces_? Also move this below mem
sohanjg
2013/12/17 14:58:12
Done.
|
// Overridden from base::DelegateSimpleThread: |
virtual void Run() OVERRIDE; |
@@ -131,27 +125,20 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
// are pending. |
bool shutdown_; |
- // This set contains all pending tasks. |
- GraphNodeMap pending_tasks_; |
- |
- // Ordered set of tasks that are ready to run. |
- typedef std::priority_queue<internal::GraphNode*, |
- std::vector<internal::GraphNode*>, |
- PriorityComparator> TaskQueue; |
- TaskQueue ready_to_run_tasks_; |
+ ScopedPtrDeque<base::DelegateSimpleThread> workers_; |
- // This set contains all currently running tasks. |
- GraphNodeMap running_tasks_; |
+ DISALLOW_COPY_AND_ASSIGN(WorkerInner); |
+}; |
- // Completed tasks not yet collected by origin thread. |
- TaskVector completed_tasks_; |
+class CC_EXPORT DerivedInner : public WorkerInner { |
+ public: |
+ DerivedInner(); |
+}; |
- ScopedPtrDeque<base::DelegateSimpleThread> workers_; |
+base::LazyInstance<DerivedInner> g_workerpool_inner; |
- DISALLOW_COPY_AND_ASSIGN(Inner); |
-}; |
-WorkerPool::Inner::Inner( |
+WorkerInner::WorkerInner( |
size_t num_threads, const std::string& thread_name_prefix) |
: lock_(), |
has_ready_to_run_tasks_cv_(&lock_), |
@@ -175,24 +162,37 @@ WorkerPool::Inner::Inner( |
} |
} |
-WorkerPool::Inner::~Inner() { |
+WorkerInner::~WorkerInner() { |
base::AutoLock lock(lock_); |
DCHECK(shutdown_); |
+ DCHECK_EQ(0u, shared_ready_to_run_tasks_.size()); |
+} |
+ |
+void WorkerInner::Register(const WorkerPool* worker_pool) { |
+ base::AutoLock lock(lock_); |
+ |
+ // Add TaskSet |
reveman
2013/12/16 17:06:30
I don't think this comment adds any valuable infor
sohanjg
2013/12/17 14:58:12
Done.
|
+ TaskSet* task_set= new TaskSet(); |
reveman
2013/12/16 17:06:30
use linked_ptr here.
sohanjg
2013/12/17 14:58:12
Done.
|
+ tasks_.insert(std::pair<const WorkerPool*, |
reveman
2013/12/16 17:06:30
Add DCHECK(tasks_.find(worker_pool) == tasks_.end(
sohanjg
2013/12/17 14:58:12
Done.
|
+ TaskSet*>(worker_pool, task_set)); |
+ |
+} |
+void WorkerInner::Unregister(const WorkerPool* worker_pool) { |
+ base::AutoLock lock(lock_); |
+ |
+ // Remove TaskSet |
reveman
2013/12/16 17:06:30
remove comment
sohanjg
2013/12/17 14:58:12
Done.
|
+ delete tasks_[worker_pool]; |
reveman
2013/12/16 17:06:30
this goes away when using linked_ptr
sohanjg
2013/12/17 14:58:12
Done.
|
+ tasks_.erase(worker_pool); |
reveman
2013/12/16 17:06:30
Add DCHECK(tasks_.find(worker_pool) != tasks_.end(
sohanjg
2013/12/17 14:58:12
Done.
|
reveman
2013/12/16 17:06:30
no need for this blankline
sohanjg
2013/12/17 14:58:12
Done.
|
- DCHECK_EQ(0u, pending_tasks_.size()); |
- DCHECK_EQ(0u, ready_to_run_tasks_.size()); |
- DCHECK_EQ(0u, running_tasks_.size()); |
- DCHECK_EQ(0u, completed_tasks_.size()); |
reveman
2013/12/16 17:06:30
I think all these DCHECKs should move to the new U
|
} |
-void WorkerPool::Inner::Shutdown() { |
+void WorkerInner::Shutdown() { |
{ |
base::AutoLock lock(lock_); |
DCHECK(!shutdown_); |
shutdown_ = true; |
- |
// Wake up a worker so it knows it should exit. This will cause all workers |
// to exit as each will wake up another worker before exiting. |
has_ready_to_run_tasks_cv_.Signal(); |
@@ -207,7 +207,7 @@ void WorkerPool::Inner::Shutdown() { |
} |
} |
-void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { |
+void WorkerInner::SetTaskGraph(TaskGraph* graph, WorkerPool* worker_pool) { |
// It is OK to call SetTaskGraph() after shutdown if |graph| is empty. |
DCHECK(graph->empty() || !shutdown_); |
@@ -220,12 +220,14 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { |
{ |
base::AutoLock lock(lock_); |
+ DCHECK(tasks_.find(worker_pool) != tasks_.end()); |
+ TaskSet* task_set = tasks_[worker_pool]; |
+ |
// First remove all completed tasks from |new_pending_tasks| and |
// adjust number of dependencies. |
- for (TaskVector::iterator it = completed_tasks_.begin(); |
- it != completed_tasks_.end(); ++it) { |
+ for (TaskVector::iterator it = task_set->completed_tasks_.begin(); |
+ it != task_set->completed_tasks_.end(); ++it) { |
internal::WorkerPoolTask* task = it->get(); |
- |
scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( |
task); |
if (node) { |
@@ -239,15 +241,16 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { |
} |
// Build new running task set. |
- for (GraphNodeMap::iterator it = running_tasks_.begin(); |
- it != running_tasks_.end(); ++it) { |
+ for (GraphNodeMap::iterator it = |
+ task_set->running_tasks_.begin(); |
+ it != task_set->running_tasks_.end(); ++it) { |
internal::WorkerPoolTask* task = it->first; |
// Transfer scheduled task value from |new_pending_tasks| to |
// |new_running_tasks| if currently running. Value must be set to |
// NULL if |new_pending_tasks| doesn't contain task. This does |
// the right in both cases. |
new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); |
- } |
+ } |
reveman
2013/12/16 17:06:30
no need to add a space here
sohanjg
2013/12/17 14:58:12
Done.
|
// Build new "ready to run" tasks queue. |
// TODO(reveman): Create this queue when building the task graph instead. |
@@ -268,78 +271,101 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { |
new_ready_to_run_tasks.push(node); |
// Erase the task from old pending tasks. |
- pending_tasks_.erase(task); |
+ task_set->pending_tasks_.erase(task); |
+ |
} |
- completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size()); |
+ task_set->completed_tasks_.reserve( |
+ task_set->completed_tasks_.size() + |
+ task_set->pending_tasks_.size()); |
// The items left in |pending_tasks_| need to be canceled. |
- for (GraphNodeMap::const_iterator it = pending_tasks_.begin(); |
- it != pending_tasks_.end(); |
- ++it) { |
- completed_tasks_.push_back(it->first); |
+ for (GraphNodeMap::const_iterator it = |
+ task_set->pending_tasks_.begin(); |
+ it != task_set->pending_tasks_.end(); |
+ ++it) { |
+ task_set->completed_tasks_.push_back(it->first); |
} |
// Swap task sets. |
// Note: old tasks are intentionally destroyed after releasing |lock_|. |
- pending_tasks_.swap(new_pending_tasks); |
- running_tasks_.swap(new_running_tasks); |
- std::swap(ready_to_run_tasks_, new_ready_to_run_tasks); |
+ task_set->pending_tasks_.swap(new_pending_tasks); |
+ task_set->running_tasks_.swap(new_running_tasks); |
+ std::swap(task_set->ready_to_run_tasks_, new_ready_to_run_tasks); |
+ |
+ // Re-create the shared_ready_to_run_tasks_ with new taskset |
+ while (!shared_ready_to_run_tasks_.empty()) |
+ shared_ready_to_run_tasks_.pop(); |
+ shared_ready_to_run_tasks_.push(task_set); |
reveman
2013/12/16 17:06:30
This will clear the queue and only add back task_s
sohanjg
2013/12/17 14:58:12
There will be only 1 taskset/workerpool, so each t
reveman
2013/12/19 02:09:38
we'll have calls to settaskgraph from multiple thr
sohanjg
2013/12/19 15:06:59
Done.
When i check the run-time state of the shar
|
// If |ready_to_run_tasks_| is empty, it means we either have |
// running tasks, or we have no pending tasks. |
- DCHECK(!ready_to_run_tasks_.empty() || |
- (pending_tasks_.empty() || !running_tasks_.empty())); |
+ DCHECK(!task_set->ready_to_run_tasks_.empty() || |
+ (task_set->pending_tasks_.empty() || |
+ !task_set->running_tasks_.empty())); |
// If there is more work available, wake up worker thread. |
- if (!ready_to_run_tasks_.empty()) |
+ if (!task_set->ready_to_run_tasks_.empty()) |
has_ready_to_run_tasks_cv_.Signal(); |
} |
} |
-void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) { |
+void WorkerInner::CollectCompletedTasks |
+ (TaskVector* completed_tasks, WorkerPool* worker_pool) { |
base::AutoLock lock(lock_); |
DCHECK_EQ(0u, completed_tasks->size()); |
- completed_tasks->swap(completed_tasks_); |
+ if (!shared_ready_to_run_tasks_.empty()) |
+ completed_tasks->swap(shared_ready_to_run_tasks_.top()->completed_tasks_); |
+ |
} |
-void WorkerPool::Inner::Run() { |
+void WorkerInner::Run() { |
base::AutoLock lock(lock_); |
// Get a unique thread index. |
int thread_index = next_thread_index_++; |
+ TaskSet* ready_to_run_task_set = NULL; |
while (true) { |
- if (ready_to_run_tasks_.empty()) { |
- // Exit when shutdown is set and no more tasks are pending. |
- if (shutdown_ && pending_tasks_.empty()) |
- break; |
- |
- // Wait for more tasks. |
- has_ready_to_run_tasks_cv_.Wait(); |
- continue; |
+ |
+ if (shared_ready_to_run_tasks_.empty()) { |
+ // Exit when shutdown is set and no more tasks are pending. |
+ if (shutdown_ && ready_to_run_task_set && |
+ ready_to_run_task_set->pending_tasks_.empty()) { |
+ break; |
+ } |
+ // Wait for more tasks. |
+ has_ready_to_run_tasks_cv_.Wait(); |
+ continue; |
} |
+ // Take top priority TaskSet from |shared_ready_to_run_tasks_|. |
+ ready_to_run_task_set = shared_ready_to_run_tasks_.top(); |
+ |
// Take top priority task from |ready_to_run_tasks_|. |
scoped_refptr<internal::WorkerPoolTask> task( |
- ready_to_run_tasks_.top()->task()); |
- ready_to_run_tasks_.pop(); |
+ ready_to_run_task_set->ready_to_run_tasks_.top()->task()); |
+ ready_to_run_task_set->ready_to_run_tasks_.pop(); |
+ |
// Move task from |pending_tasks_| to |running_tasks_|. |
- DCHECK(pending_tasks_.contains(task.get())); |
- DCHECK(!running_tasks_.contains(task.get())); |
- running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get())); |
+ DCHECK(ready_to_run_task_set->pending_tasks_.contains(task.get())); |
+ DCHECK(!ready_to_run_task_set->running_tasks_.contains(task.get())); |
+ |
+ ready_to_run_task_set->running_tasks_.set( |
+ task.get(), ready_to_run_task_set->pending_tasks_.take_and_erase |
+ (task.get())); |
// There may be more work available, so wake up another worker thread. |
has_ready_to_run_tasks_cv_.Signal(); |
+ |
// Call WillRun() before releasing |lock_| and running task. |
task->WillRun(); |
{ |
base::AutoUnlock unlock(lock_); |
- |
task->RunOnWorkerThread(thread_index); |
} |
@@ -348,24 +374,30 @@ void WorkerPool::Inner::Run() { |
// Now iterate over all dependents to remove dependency and check |
// if they are ready to run. |
- scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase( |
+ scoped_ptr<internal::GraphNode> node = |
+ ready_to_run_task_set->running_tasks_.take_and_erase( |
task.get()); |
- if (node) { |
- for (internal::GraphNode::Vector::const_iterator it = |
- node->dependents().begin(); |
- it != node->dependents().end(); ++it) { |
- internal::GraphNode* dependent_node = *it; |
- |
- dependent_node->remove_dependency(); |
- // Task is ready if it has no dependencies. Add it to |
- // |ready_to_run_tasks_|. |
- if (!dependent_node->num_dependencies()) |
- ready_to_run_tasks_.push(dependent_node); |
+ if (node) { |
+ for (internal::GraphNode::Vector::const_iterator it = |
+ node->dependents().begin(); |
+ it != node->dependents().end(); ++it) { |
+ internal::GraphNode* dependent_node = *it; |
+ |
+ dependent_node->remove_dependency(); |
+ // Task is ready if it has no dependencies. Add it to |
+ // |ready_to_run_tasks_|. |
+ if (!dependent_node->num_dependencies()) |
+ ready_to_run_task_set->ready_to_run_tasks_.push(dependent_node); |
+ } |
} |
- } |
// Finally add task to |completed_tasks_|. |
- completed_tasks_.push_back(task); |
+ ready_to_run_task_set->completed_tasks_.push_back(task); |
+ |
+ // Pop when ready_to_run_tasks_ is empty |
+ if (ready_to_run_task_set->ready_to_run_tasks_.empty()) |
+ shared_ready_to_run_tasks_.pop(); |
+ |
} |
// We noticed we should exit. Wake up the next worker so it knows it should |
@@ -373,21 +405,87 @@ void WorkerPool::Inner::Run() { |
has_ready_to_run_tasks_cv_.Signal(); |
} |
+// Derived WorkerInner Ctor |
+DerivedInner::DerivedInner(): WorkerInner |
+ (switches::GetNumRasterThreads(), "CompositorRaster") { |
+} |
+ |
+} // namespace |
+ |
+namespace internal { |
+ |
+WorkerPoolTask::WorkerPoolTask() |
+ : did_schedule_(false), |
+ did_run_(false), |
+ did_complete_(false) { |
+} |
+ |
+WorkerPoolTask::~WorkerPoolTask() { |
+ DCHECK_EQ(did_schedule_, did_complete_); |
+ DCHECK(!did_run_ || did_schedule_); |
+ DCHECK(!did_run_ || did_complete_); |
+} |
+ |
+void WorkerPoolTask::DidSchedule() { |
+ DCHECK(!did_complete_); |
+ did_schedule_ = true; |
+} |
+ |
+void WorkerPoolTask::WillRun() { |
+ DCHECK(did_schedule_); |
+ DCHECK(!did_complete_); |
+ DCHECK(!did_run_); |
+} |
+ |
+void WorkerPoolTask::DidRun() { |
+ did_run_ = true; |
+} |
+ |
+void WorkerPoolTask::WillComplete() { |
+ DCHECK(!did_complete_); |
+} |
+ |
+void WorkerPoolTask::DidComplete() { |
+ DCHECK(did_schedule_); |
+ DCHECK(!did_complete_); |
+ did_complete_ = true; |
+} |
+ |
+bool WorkerPoolTask::HasFinishedRunning() const { |
+ return did_run_; |
+} |
+ |
+bool WorkerPoolTask::HasCompleted() const { |
+ return did_complete_; |
+} |
+ |
+GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) |
+ : task_(task), |
+ priority_(priority), |
+ num_dependencies_(0) { |
+} |
+ |
+GraphNode::~GraphNode() { |
+} |
+ |
+} // namespace internal |
+ |
+ |
WorkerPool::WorkerPool(size_t num_threads, |
const std::string& thread_name_prefix) |
- : in_dispatch_completion_callbacks_(false), |
- inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) { |
+ : in_dispatch_completion_callbacks_(false) { |
+ g_workerpool_inner.Pointer()->Register(this); |
} |
WorkerPool::~WorkerPool() { |
+ g_workerpool_inner.Pointer()->Unregister(this); |
} |
void WorkerPool::Shutdown() { |
TRACE_EVENT0("cc", "WorkerPool::Shutdown"); |
DCHECK(!in_dispatch_completion_callbacks_); |
- |
- inner_->Shutdown(); |
+ g_workerpool_inner.Pointer()->Shutdown(); |
} |
void WorkerPool::CheckForCompletedTasks() { |
@@ -396,7 +494,7 @@ void WorkerPool::CheckForCompletedTasks() { |
DCHECK(!in_dispatch_completion_callbacks_); |
TaskVector completed_tasks; |
- inner_->CollectCompletedTasks(&completed_tasks); |
+ g_workerpool_inner.Pointer()->CollectCompletedTasks(&completed_tasks, this); |
ProcessCompletedTasks(completed_tasks); |
} |
@@ -426,8 +524,7 @@ void WorkerPool::SetTaskGraph(TaskGraph* graph) { |
"num_tasks", graph->size()); |
DCHECK(!in_dispatch_completion_callbacks_); |
- |
- inner_->SetTaskGraph(graph); |
+ g_workerpool_inner.Pointer()->SetTaskGraph(graph, this); |
} |
} // namespace cc |