OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/resources/worker_pool.h" | 5 #include "cc/resources/worker_pool.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <queue> | 8 #include <queue> |
9 | 9 |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
11 #include "base/containers/hash_tables.h" | 11 #include "base/containers/hash_tables.h" |
12 #include "base/debug/trace_event.h" | 12 #include "base/debug/trace_event.h" |
13 #include "base/lazy_instance.h" | |
14 #include "base/memory/linked_ptr.h" | |
13 #include "base/strings/stringprintf.h" | 15 #include "base/strings/stringprintf.h" |
14 #include "base/synchronization/condition_variable.h" | 16 #include "base/synchronization/condition_variable.h" |
15 #include "base/threading/simple_thread.h" | 17 #include "base/threading/simple_thread.h" |
16 #include "base/threading/thread_restrictions.h" | 18 #include "base/threading/thread_restrictions.h" |
17 #include "cc/base/scoped_ptr_deque.h" | 19 #include "cc/base/scoped_ptr_deque.h" |
20 #include "cc/base/switches.h" | |
18 | 21 |
19 namespace cc { | 22 namespace cc { |
20 | 23 |
21 namespace internal { | 24 namespace { |
22 | 25 |
23 WorkerPoolTask::WorkerPoolTask() | 26 // TaskGraphRunners can process task graphs from multiple |
24 : did_schedule_(false), | 27 // workerpool instances. All members are guarded by |lock_|. |
25 did_run_(false), | 28 class TaskGraphRunner : public base::DelegateSimpleThread::Delegate { |
26 did_complete_(false) { | 29 public: |
27 } | 30 typedef WorkerPool::TaskGraph TaskGraph; |
31 typedef WorkerPool::TaskVector TaskVector; | |
28 | 32 |
29 WorkerPoolTask::~WorkerPoolTask() { | 33 TaskGraphRunner(size_t num_threads, const std::string& thread_name_prefix); |
30 DCHECK_EQ(did_schedule_, did_complete_); | 34 virtual ~TaskGraphRunner(); |
31 DCHECK(!did_run_ || did_schedule_); | |
32 DCHECK(!did_run_ || did_complete_); | |
33 } | |
34 | 35 |
35 void WorkerPoolTask::DidSchedule() { | 36 void Register(const WorkerPool* worker_pool); |
36 DCHECK(!did_complete_); | 37 void Unregister(const WorkerPool* worker_pool); |
37 did_schedule_ = true; | |
38 } | |
39 | |
40 void WorkerPoolTask::WillRun() { | |
41 DCHECK(did_schedule_); | |
42 DCHECK(!did_complete_); | |
43 DCHECK(!did_run_); | |
44 } | |
45 | |
46 void WorkerPoolTask::DidRun() { | |
47 did_run_ = true; | |
48 } | |
49 | |
50 void WorkerPoolTask::WillComplete() { | |
51 DCHECK(!did_complete_); | |
52 } | |
53 | |
54 void WorkerPoolTask::DidComplete() { | |
55 DCHECK(did_schedule_); | |
56 DCHECK(!did_complete_); | |
57 did_complete_ = true; | |
58 } | |
59 | |
60 bool WorkerPoolTask::HasFinishedRunning() const { | |
61 return did_run_; | |
62 } | |
63 | |
64 bool WorkerPoolTask::HasCompleted() const { | |
65 return did_complete_; | |
66 } | |
67 | |
68 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) | |
69 : task_(task), | |
70 priority_(priority), | |
71 num_dependencies_(0) { | |
72 } | |
73 | |
74 GraphNode::~GraphNode() { | |
75 } | |
76 | |
77 } // namespace internal | |
78 | |
79 // Internal to the worker pool. Any data or logic that needs to be | |
80 // shared between threads lives in this class. All members are guarded | |
81 // by |lock_|. | |
82 class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { | |
83 public: | |
84 Inner(size_t num_threads, const std::string& thread_name_prefix); | |
85 virtual ~Inner(); | |
86 | |
87 void Shutdown(); | |
88 | 38 |
89 // Schedule running of tasks in |graph|. Tasks previously scheduled but | 39 // Schedule running of tasks in |graph|. Tasks previously scheduled but |
90 // no longer needed will be canceled unless already running. Canceled | 40 // no longer needed will be canceled unless already running. Canceled |
91 // tasks are moved to |completed_tasks_| without being run. The result | 41 // tasks are moved to |completed_tasks| without being run. The result |
92 // is that once scheduled, a task is guaranteed to end up in the | 42 // is that once scheduled, a task is guaranteed to end up in the |
93 // |completed_tasks_| queue even if they later get canceled by another | 43 // |completed_tasks| queue even if it later get canceled by another |
94 // call to SetTaskGraph(). | 44 // call to SetTaskGraph(). |
95 void SetTaskGraph(TaskGraph* graph); | 45 void SetTaskGraph(const WorkerPool* worker_pool, TaskGraph* graph); |
46 | |
47 // Wait for all scheduled tasks to finish running | |
reveman
2013/12/31 01:16:52
nit: end line with "."
sohanjg
2013/12/31 06:31:25
Done.
| |
48 void WaitForTasksToFinishRunning(const WorkerPool* worker_pool); | |
96 | 49 |
97 // Collect all completed tasks in |completed_tasks|. | 50 // Collect all completed tasks in |completed_tasks|. |
98 void CollectCompletedTasks(TaskVector* completed_tasks); | 51 void CollectCompletedTasks(const WorkerPool* worker_pool, |
52 TaskVector* completed_tasks); | |
99 | 53 |
100 private: | 54 private: |
101 class PriorityComparator { | 55 class TaskPriorityComparator { |
102 public: | 56 public: |
103 bool operator()(const internal::GraphNode* a, | 57 bool operator()(internal::GraphNode* a, |
vmpstr
2013/12/30 21:13:01
Why non-const?
sohanjg
2013/12/31 06:31:25
Had to drop the const to avoid code duplication, o
vmpstr
2014/01/02 05:38:55
I'm not sure I understand. priority_queue::top() i
| |
104 const internal::GraphNode* b) { | 58 internal::GraphNode* b) { |
105 // In this system, numerically lower priority is run first. | 59 // In this system, numerically lower priority is run first. |
106 if (a->priority() != b->priority()) | 60 if (a->priority() != b->priority()) |
107 return a->priority() > b->priority(); | 61 return a->priority() > b->priority(); |
108 | 62 |
109 // Run task with most dependents first when priority is the same. | 63 // Run task with most dependents first when priority is the same. |
110 return a->dependents().size() < b->dependents().size(); | 64 return a->dependents().size() < b->dependents().size(); |
111 } | 65 } |
112 }; | 66 }; |
113 | 67 |
68 typedef std::priority_queue<internal::GraphNode*, | |
69 std::vector<internal::GraphNode*>, | |
70 TaskPriorityComparator> TaskQueue; | |
71 | |
72 struct TaskNamespace { | |
73 // This set contains all pending tasks. | |
74 TaskGraph pending_tasks; | |
75 // This set contains all currently running tasks. | |
76 TaskGraph running_tasks; | |
77 // Completed tasks not yet collected by origin thread. | |
78 TaskVector completed_tasks; | |
79 // Ordered set of tasks that are ready to run. | |
80 TaskQueue ready_to_run_tasks; | |
81 }; | |
82 | |
83 class TaskNamespacePriorityComparator { | |
84 public: | |
85 bool operator()(TaskNamespace* a, | |
vmpstr
2013/12/30 21:13:01
If we define GraphNode::operator<() instead of Tas
sohanjg
2013/12/31 06:31:25
As you pointed out, doing it with "operator <()" w
reveman
2014/01/02 03:50:08
Please rebase this onto:
https://codereview.chrom
| |
86 TaskNamespace* b) { | |
87 return task_comparators_.operator()(a->ready_to_run_tasks.top(), | |
vmpstr
2013/12/30 21:13:01
DCHECK that the queues aren't empty, please. (I kn
sohanjg
2013/12/31 06:31:25
Should we do it without holding the lock here ?
reveman
2014/01/02 03:50:08
No need to worry about the lock here. That's the c
| |
88 b->ready_to_run_tasks.top()); | |
89 } | |
90 TaskPriorityComparator task_comparators_; | |
vmpstr
2013/12/30 21:13:01
nit: task_comparator_?
sohanjg
2013/12/31 06:31:25
Done.
| |
91 }; | |
92 | |
93 typedef std::map<const WorkerPool*, linked_ptr<TaskNamespace> > | |
94 TaskNamespaceMap; | |
95 | |
96 typedef std::priority_queue<TaskNamespace*, | |
97 std::vector<TaskNamespace*>, | |
98 TaskNamespacePriorityComparator> | |
99 TaskNamespaceQueue; | |
100 | |
114 // Overridden from base::DelegateSimpleThread: | 101 // Overridden from base::DelegateSimpleThread: |
115 virtual void Run() OVERRIDE; | 102 virtual void Run() OVERRIDE; |
116 | 103 |
104 inline bool has_finished_running_tasks(TaskNamespace* task_namespace) { | |
105 return (task_namespace->pending_tasks.empty() && | |
106 task_namespace->running_tasks.empty()); | |
107 } | |
108 | |
117 // This lock protects all members of this class except | 109 // This lock protects all members of this class except |
118 // |worker_pool_on_origin_thread_|. Do not read or modify anything | 110 // |worker_pool_on_origin_thread_|. Do not read or modify anything |
119 // without holding this lock. Do not block while holding this lock. | 111 // without holding this lock. Do not block while holding this lock. |
120 mutable base::Lock lock_; | 112 mutable base::Lock lock_; |
121 | 113 |
122 // Condition variable that is waited on by worker threads until new | 114 // Condition variable that is waited on by worker threads until new |
123 // tasks are ready to run or shutdown starts. | 115 // tasks are ready to run or shutdown starts. |
124 base::ConditionVariable has_ready_to_run_tasks_cv_; | 116 base::ConditionVariable has_ready_to_run_tasks_cv_; |
125 | 117 |
118 // Condition variable that is waited on by origin threads until a | |
119 // namespace has finished running all associated tasks. | |
120 base::ConditionVariable has_namespaces_with_finished_running_tasks_cv_; | |
121 | |
126 // Provides each running thread loop with a unique index. First thread | 122 // Provides each running thread loop with a unique index. First thread |
127 // loop index is 0. | 123 // loop index is 0. |
128 unsigned next_thread_index_; | 124 unsigned next_thread_index_; |
129 | 125 |
130 // Set during shutdown. Tells workers to exit when no more tasks | 126 // Set during shutdown. Tells workers to exit when no more tasks |
131 // are pending. | 127 // are pending. |
132 bool shutdown_; | 128 bool shutdown_; |
133 | 129 |
134 // This set contains all pending tasks. | 130 // This set contains all registered namespaces. |
135 GraphNodeMap pending_tasks_; | 131 TaskNamespaceMap namespaces_; |
136 | 132 |
137 // Ordered set of tasks that are ready to run. | 133 // Ordered set of tasks namespaces that have ready to run tasks. |
reveman
2013/12/31 01:16:52
nit: s/set of tasks namespaces/set of task namespa
sohanjg
2013/12/31 06:31:25
Done.
| |
138 typedef std::priority_queue<internal::GraphNode*, | 134 TaskNamespaceQueue ready_to_run_namespaces_; |
139 std::vector<internal::GraphNode*>, | |
140 PriorityComparator> TaskQueue; | |
141 TaskQueue ready_to_run_tasks_; | |
142 | |
143 // This set contains all currently running tasks. | |
144 GraphNodeMap running_tasks_; | |
145 | |
146 // Completed tasks not yet collected by origin thread. | |
147 TaskVector completed_tasks_; | |
148 | 135 |
149 ScopedPtrDeque<base::DelegateSimpleThread> workers_; | 136 ScopedPtrDeque<base::DelegateSimpleThread> workers_; |
150 | 137 |
151 DISALLOW_COPY_AND_ASSIGN(Inner); | 138 DISALLOW_COPY_AND_ASSIGN(TaskGraphRunner); |
152 }; | 139 }; |
153 | 140 |
154 WorkerPool::Inner::Inner( | 141 TaskGraphRunner::TaskGraphRunner( |
155 size_t num_threads, const std::string& thread_name_prefix) | 142 size_t num_threads, const std::string& thread_name_prefix) |
156 : lock_(), | 143 : lock_(), |
157 has_ready_to_run_tasks_cv_(&lock_), | 144 has_ready_to_run_tasks_cv_(&lock_), |
145 has_namespaces_with_finished_running_tasks_cv_(&lock_), | |
158 next_thread_index_(0), | 146 next_thread_index_(0), |
159 shutdown_(false) { | 147 shutdown_(false) { |
160 base::AutoLock lock(lock_); | 148 base::AutoLock lock(lock_); |
161 | 149 |
162 while (workers_.size() < num_threads) { | 150 while (workers_.size() < num_threads) { |
163 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( | 151 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( |
164 new base::DelegateSimpleThread( | 152 new base::DelegateSimpleThread( |
165 this, | 153 this, |
166 thread_name_prefix + | 154 thread_name_prefix + |
167 base::StringPrintf( | 155 base::StringPrintf( |
168 "Worker%u", | 156 "Worker%u", |
169 static_cast<unsigned>(workers_.size() + 1)).c_str())); | 157 static_cast<unsigned>(workers_.size() + 1)).c_str())); |
170 worker->Start(); | 158 worker->Start(); |
171 #if defined(OS_ANDROID) || defined(OS_LINUX) | 159 #if defined(OS_ANDROID) || defined(OS_LINUX) |
172 worker->SetThreadPriority(base::kThreadPriority_Background); | 160 worker->SetThreadPriority(base::kThreadPriority_Background); |
173 #endif | 161 #endif |
174 workers_.push_back(worker.Pass()); | 162 workers_.push_back(worker.Pass()); |
175 } | 163 } |
176 } | 164 } |
177 | 165 |
178 WorkerPool::Inner::~Inner() { | 166 TaskGraphRunner::~TaskGraphRunner() { |
179 base::AutoLock lock(lock_); | |
180 | |
181 DCHECK(shutdown_); | |
182 | |
183 DCHECK_EQ(0u, pending_tasks_.size()); | |
184 DCHECK_EQ(0u, ready_to_run_tasks_.size()); | |
185 DCHECK_EQ(0u, running_tasks_.size()); | |
186 DCHECK_EQ(0u, completed_tasks_.size()); | |
187 } | |
188 | |
189 void WorkerPool::Inner::Shutdown() { | |
190 { | 167 { |
191 base::AutoLock lock(lock_); | 168 base::AutoLock lock(lock_); |
192 | 169 |
170 DCHECK_EQ(0u, ready_to_run_namespaces_.size()); | |
171 DCHECK_EQ(0u, namespaces_.size()); | |
172 | |
193 DCHECK(!shutdown_); | 173 DCHECK(!shutdown_); |
194 shutdown_ = true; | 174 shutdown_ = true; |
195 | 175 |
196 // Wake up a worker so it knows it should exit. This will cause all workers | 176 // Wake up a worker so it knows it should exit. This will cause all workers |
197 // to exit as each will wake up another worker before exiting. | 177 // to exit as each will wake up another worker before exiting. |
198 has_ready_to_run_tasks_cv_.Signal(); | 178 has_ready_to_run_tasks_cv_.Signal(); |
199 } | 179 } |
200 | 180 |
201 while (workers_.size()) { | 181 while (workers_.size()) { |
202 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); | 182 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); |
203 // http://crbug.com/240453 - Join() is considered IO and will block this | 183 // http://crbug.com/240453 - Join() is considered IO and will block this |
204 // thread. See also http://crbug.com/239423 for further ideas. | 184 // thread. See also http://crbug.com/239423 for further ideas. |
205 base::ThreadRestrictions::ScopedAllowIO allow_io; | 185 base::ThreadRestrictions::ScopedAllowIO allow_io; |
206 worker->Join(); | 186 worker->Join(); |
207 } | 187 } |
208 } | 188 } |
209 | 189 |
210 void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { | 190 void TaskGraphRunner::Register(const WorkerPool* worker_pool) { |
211 // It is OK to call SetTaskGraph() after shutdown if |graph| is empty. | 191 base::AutoLock lock(lock_); |
212 DCHECK(graph->empty() || !shutdown_); | |
213 | 192 |
214 GraphNodeMap new_pending_tasks; | 193 DCHECK(namespaces_.find(worker_pool) == namespaces_.end()); |
215 GraphNodeMap new_running_tasks; | 194 linked_ptr<TaskNamespace> task_set = make_linked_ptr(new TaskNamespace()); |
195 namespaces_[worker_pool] = task_set; | |
196 } | |
197 | |
198 void TaskGraphRunner::Unregister(const WorkerPool* worker_pool) { | |
199 base::AutoLock lock(lock_); | |
200 | |
201 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
202 | |
203 TaskNamespace* task_namespace = namespaces_[worker_pool].get(); | |
vmpstr
2013/12/30 21:13:01
Save the iterator from find here, so that you can
reveman
2013/12/31 01:16:52
Alternatively, use "namespaces_[worker_pool]" dire
sohanjg
2013/12/31 06:31:25
Done.
sohanjg
2013/12/31 06:31:25
Think Vlad was referring to namespaces_.find(worke
reveman
2014/01/02 03:50:08
Multiple lookups in DCHECKs are fine but we should
| |
204 DCHECK_EQ(0u, task_namespace->pending_tasks.size()); | |
205 DCHECK_EQ(0u, task_namespace->ready_to_run_tasks.size()); | |
206 DCHECK_EQ(0u, task_namespace->running_tasks.size()); | |
207 DCHECK_EQ(0u, task_namespace->completed_tasks.size()); | |
208 | |
209 namespaces_.erase(worker_pool); | |
210 } | |
211 | |
212 void TaskGraphRunner::WaitForTasksToFinishRunning( | |
213 const WorkerPool* worker_pool) { | |
214 base::AutoLock lock(lock_); | |
215 | |
216 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
217 TaskNamespace* task_namespace = namespaces_[worker_pool].get(); | |
218 | |
219 while (true) { | |
vmpstr
2013/12/30 21:13:01
I think this is equivalent to the following:
whil
sohanjg
2013/12/31 06:31:25
Done.
| |
220 if (has_finished_running_tasks(task_namespace)) { | |
221 // There may be other namespaces that have finished running | |
222 // tasks, so wake up another origin thread. | |
223 has_namespaces_with_finished_running_tasks_cv_.Signal(); | |
224 return; | |
225 } | |
226 has_namespaces_with_finished_running_tasks_cv_.Wait(); | |
227 } | |
228 } | |
229 | |
230 void TaskGraphRunner::SetTaskGraph(const WorkerPool* worker_pool, | |
231 TaskGraph* graph) { | |
232 TaskGraph new_pending_tasks; | |
233 TaskGraph new_running_tasks; | |
216 TaskQueue new_ready_to_run_tasks; | 234 TaskQueue new_ready_to_run_tasks; |
235 TaskNamespaceQueue new_ready_to_run_namespaces; | |
217 | 236 |
218 new_pending_tasks.swap(*graph); | 237 new_pending_tasks.swap(*graph); |
219 | 238 |
220 { | 239 { |
221 base::AutoLock lock(lock_); | 240 base::AutoLock lock(lock_); |
222 | 241 |
242 DCHECK(!shutdown_); | |
243 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
244 TaskNamespace* task_namespace = namespaces_[worker_pool].get(); | |
245 | |
223 // First remove all completed tasks from |new_pending_tasks| and | 246 // First remove all completed tasks from |new_pending_tasks| and |
224 // adjust number of dependencies. | 247 // adjust number of dependencies. |
225 for (TaskVector::iterator it = completed_tasks_.begin(); | 248 for (TaskVector::iterator it = task_namespace->completed_tasks.begin(); |
226 it != completed_tasks_.end(); ++it) { | 249 it != task_namespace->completed_tasks.end(); ++it) { |
227 internal::WorkerPoolTask* task = it->get(); | 250 internal::WorkerPoolTask* task = it->get(); |
228 | 251 |
229 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( | 252 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( |
230 task); | 253 task); |
231 if (node) { | 254 if (node) { |
232 for (internal::GraphNode::Vector::const_iterator it = | 255 for (internal::GraphNode::Vector::const_iterator it = |
233 node->dependents().begin(); | 256 node->dependents().begin(); |
234 it != node->dependents().end(); ++it) { | 257 it != node->dependents().end(); ++it) { |
235 internal::GraphNode* dependent_node = *it; | 258 internal::GraphNode* dependent_node = *it; |
236 dependent_node->remove_dependency(); | 259 dependent_node->remove_dependency(); |
237 } | 260 } |
238 } | 261 } |
239 } | 262 } |
240 | 263 |
241 // Build new running task set. | 264 // Build new running task set. |
242 for (GraphNodeMap::iterator it = running_tasks_.begin(); | 265 for (TaskGraph::iterator it = task_namespace->running_tasks.begin(); |
243 it != running_tasks_.end(); ++it) { | 266 it != task_namespace->running_tasks.end(); ++it) { |
244 internal::WorkerPoolTask* task = it->first; | 267 internal::WorkerPoolTask* task = it->first; |
245 // Transfer scheduled task value from |new_pending_tasks| to | 268 // Transfer scheduled task value from |new_pending_tasks| to |
246 // |new_running_tasks| if currently running. Value must be set to | 269 // |new_running_tasks| if currently running. Value must be set to |
247 // NULL if |new_pending_tasks| doesn't contain task. This does | 270 // NULL if |new_pending_tasks| doesn't contain task. This does |
248 // the right in both cases. | 271 // the right in both cases. |
249 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); | 272 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); |
250 } | 273 } |
251 | 274 |
252 // Build new "ready to run" tasks queue. | 275 // Build new "ready to run" tasks queue. |
253 // TODO(reveman): Create this queue when building the task graph instead. | 276 // TODO(reveman): Create this queue when building the task graph instead. |
254 for (GraphNodeMap::iterator it = new_pending_tasks.begin(); | 277 for (TaskGraph::iterator it = new_pending_tasks.begin(); |
255 it != new_pending_tasks.end(); ++it) { | 278 it != new_pending_tasks.end(); ++it) { |
256 internal::WorkerPoolTask* task = it->first; | 279 internal::WorkerPoolTask* task = it->first; |
257 DCHECK(task); | 280 DCHECK(task); |
258 internal::GraphNode* node = it->second; | 281 internal::GraphNode* node = it->second; |
259 | 282 |
260 // Completed tasks should not exist in |new_pending_tasks|. | 283 // Completed tasks should not exist in |new_pending_tasks|. |
261 DCHECK(!task->HasFinishedRunning()); | 284 DCHECK(!task->HasFinishedRunning()); |
262 | 285 |
263 // Call DidSchedule() to indicate that this task has been scheduled. | 286 // Call DidSchedule() to indicate that this task has been scheduled. |
264 // Note: This is only for debugging purposes. | 287 // Note: This is only for debugging purposes. |
265 task->DidSchedule(); | 288 task->DidSchedule(); |
266 | 289 |
267 if (!node->num_dependencies()) | 290 if (!node->num_dependencies()) |
268 new_ready_to_run_tasks.push(node); | 291 new_ready_to_run_tasks.push(node); |
269 | 292 |
270 // Erase the task from old pending tasks. | 293 // Erase the task from old pending tasks. |
271 pending_tasks_.erase(task); | 294 task_namespace->pending_tasks.erase(task); |
272 } | 295 } |
273 | 296 |
274 completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size()); | 297 task_namespace->completed_tasks.reserve( |
298 task_namespace->completed_tasks.size() + | |
299 task_namespace->pending_tasks.size()); | |
275 | 300 |
276 // The items left in |pending_tasks_| need to be canceled. | 301 // The items left in |pending_tasks| need to be canceled. |
277 for (GraphNodeMap::const_iterator it = pending_tasks_.begin(); | 302 for (TaskGraph::const_iterator it = task_namespace->pending_tasks.begin(); |
278 it != pending_tasks_.end(); | 303 it != task_namespace->pending_tasks.end(); ++it) { |
279 ++it) { | 304 task_namespace->completed_tasks.push_back(it->first); |
280 completed_tasks_.push_back(it->first); | |
281 } | 305 } |
282 | 306 |
283 // Swap task sets. | 307 // Swap task sets. |
284 // Note: old tasks are intentionally destroyed after releasing |lock_|. | 308 // Note: old tasks are intentionally destroyed after releasing |lock_|. |
285 pending_tasks_.swap(new_pending_tasks); | 309 task_namespace->pending_tasks.swap(new_pending_tasks); |
286 running_tasks_.swap(new_running_tasks); | 310 task_namespace->running_tasks.swap(new_running_tasks); |
287 std::swap(ready_to_run_tasks_, new_ready_to_run_tasks); | 311 std::swap(task_namespace->ready_to_run_tasks, new_ready_to_run_tasks); |
288 | 312 |
289 // If |ready_to_run_tasks_| is empty, it means we either have | 313 // If |ready_to_run_tasks| is empty, it means we either have |
290 // running tasks, or we have no pending tasks. | 314 // running tasks, or we have no pending tasks. |
291 DCHECK(!ready_to_run_tasks_.empty() || | 315 DCHECK(!task_namespace->ready_to_run_tasks.empty() || |
292 (pending_tasks_.empty() || !running_tasks_.empty())); | 316 (task_namespace->pending_tasks.empty() || |
317 !task_namespace->running_tasks.empty())); | |
318 | |
319 // Build new "ready to run" task namespaces queue. | |
320 for (TaskNamespaceMap::iterator it = namespaces_.begin(); | |
321 it != namespaces_.end(); ++it) { | |
322 if (!it->second->ready_to_run_tasks.empty()) | |
323 new_ready_to_run_namespaces.push(it->second.get()); | |
324 } | |
325 std::swap(ready_to_run_namespaces_, new_ready_to_run_namespaces); | |
293 | 326 |
294 // If there is more work available, wake up worker thread. | 327 // If there is more work available, wake up worker thread. |
295 if (!ready_to_run_tasks_.empty()) | 328 if (!ready_to_run_namespaces_.empty()) |
296 has_ready_to_run_tasks_cv_.Signal(); | 329 has_ready_to_run_tasks_cv_.Signal(); |
297 } | 330 } |
298 } | 331 } |
299 | 332 |
300 void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) { | 333 void TaskGraphRunner::CollectCompletedTasks( |
334 const WorkerPool* worker_pool, TaskVector* completed_tasks) { | |
301 base::AutoLock lock(lock_); | 335 base::AutoLock lock(lock_); |
302 | 336 |
303 DCHECK_EQ(0u, completed_tasks->size()); | 337 DCHECK_EQ(0u, completed_tasks->size()); |
304 completed_tasks->swap(completed_tasks_); | 338 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); |
339 completed_tasks->swap(namespaces_[worker_pool]->completed_tasks); | |
305 } | 340 } |
306 | 341 |
307 void WorkerPool::Inner::Run() { | 342 void TaskGraphRunner::Run() { |
308 base::AutoLock lock(lock_); | 343 base::AutoLock lock(lock_); |
309 | 344 |
310 // Get a unique thread index. | 345 // Get a unique thread index. |
311 int thread_index = next_thread_index_++; | 346 int thread_index = next_thread_index_++; |
312 | 347 |
313 while (true) { | 348 while (true) { |
314 if (ready_to_run_tasks_.empty()) { | 349 if (ready_to_run_namespaces_.empty()) { |
315 // Exit when shutdown is set and no more tasks are pending. | 350 // Exit when shutdown is set and no more tasks are pending. |
316 if (shutdown_ && pending_tasks_.empty()) | 351 if (shutdown_) |
317 break; | 352 break; |
318 | 353 |
319 // Wait for more tasks. | 354 // Wait for more tasks. |
320 has_ready_to_run_tasks_cv_.Wait(); | 355 has_ready_to_run_tasks_cv_.Wait(); |
321 continue; | 356 continue; |
322 } | 357 } |
323 | 358 |
324 // Take top priority task from |ready_to_run_tasks_|. | 359 // Take top priority TaskNamespace from |ready_to_run_namespaces_|. |
360 TaskNamespace* task_namespace = ready_to_run_namespaces_.top(); | |
361 ready_to_run_namespaces_.pop(); | |
362 DCHECK(!task_namespace->ready_to_run_tasks.empty()); | |
363 | |
364 // Take top priority task from |ready_to_run_tasks|. | |
325 scoped_refptr<internal::WorkerPoolTask> task( | 365 scoped_refptr<internal::WorkerPoolTask> task( |
326 ready_to_run_tasks_.top()->task()); | 366 task_namespace->ready_to_run_tasks.top()->task()); |
327 ready_to_run_tasks_.pop(); | 367 task_namespace->ready_to_run_tasks.pop(); |
328 | 368 |
329 // Move task from |pending_tasks_| to |running_tasks_|. | 369 // Add task namespace back to |ready_to_run_namespaces_| if not |
330 DCHECK(pending_tasks_.contains(task.get())); | 370 // empty after taking top priority task. |
331 DCHECK(!running_tasks_.contains(task.get())); | 371 if (!task_namespace->ready_to_run_tasks.empty()) |
332 running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get())); | 372 ready_to_run_namespaces_.push(task_namespace); |
373 | |
374 // Move task from |pending_tasks| to |running_tasks|. | |
375 DCHECK(task_namespace->pending_tasks.contains(task.get())); | |
376 DCHECK(!task_namespace->running_tasks.contains(task.get())); | |
377 task_namespace->running_tasks.set( | |
378 task.get(), | |
379 task_namespace->pending_tasks.take_and_erase(task.get())); | |
333 | 380 |
334 // There may be more work available, so wake up another worker thread. | 381 // There may be more work available, so wake up another worker thread. |
335 has_ready_to_run_tasks_cv_.Signal(); | 382 has_ready_to_run_tasks_cv_.Signal(); |
336 | 383 |
337 // Call WillRun() before releasing |lock_| and running task. | 384 // Call WillRun() before releasing |lock_| and running task. |
338 task->WillRun(); | 385 task->WillRun(); |
339 | 386 |
340 { | 387 { |
341 base::AutoUnlock unlock(lock_); | 388 base::AutoUnlock unlock(lock_); |
342 | 389 |
343 task->RunOnWorkerThread(thread_index); | 390 task->RunOnWorkerThread(thread_index); |
344 } | 391 } |
345 | 392 |
346 // This will mark task as finished running. | 393 // This will mark task as finished running. |
347 task->DidRun(); | 394 task->DidRun(); |
348 | 395 |
349 // Now iterate over all dependents to remove dependency and check | 396 // Now iterate over all dependents to remove dependency and check |
350 // if they are ready to run. | 397 // if they are ready to run. |
351 scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase( | 398 scoped_ptr<internal::GraphNode> node = |
352 task.get()); | 399 task_namespace->running_tasks.take_and_erase(task.get()); |
353 if (node) { | 400 if (node) { |
354 for (internal::GraphNode::Vector::const_iterator it = | 401 for (internal::GraphNode::Vector::const_iterator it = |
355 node->dependents().begin(); | 402 node->dependents().begin(); |
356 it != node->dependents().end(); ++it) { | 403 it != node->dependents().end(); ++it) { |
357 internal::GraphNode* dependent_node = *it; | 404 internal::GraphNode* dependent_node = *it; |
358 | 405 |
359 dependent_node->remove_dependency(); | 406 dependent_node->remove_dependency(); |
360 // Task is ready if it has no dependencies. Add it to | 407 // Task is ready if it has no dependencies. Add it to |
361 // |ready_to_run_tasks_|. | 408 // |ready_to_run_tasks|. |
362 if (!dependent_node->num_dependencies()) | 409 if (!dependent_node->num_dependencies()) { |
363 ready_to_run_tasks_.push(dependent_node); | 410 bool was_empty = task_namespace->ready_to_run_tasks.empty(); |
411 task_namespace->ready_to_run_tasks.push(dependent_node); | |
412 // Task namespace is ready if it has at least one ready | |
413 // to run task. Add it to |ready_to_run_namespaces_| if | |
414 // it just become ready. | |
415 if (was_empty) | |
416 ready_to_run_namespaces_.push(task_namespace); | |
vmpstr
2013/12/30 21:13:01
I think this might not be correct. By adding a new
reveman
2013/12/31 01:16:52
Good call. I guess we'll have to iterate over all
reveman
2013/12/31 03:37:29
An alternative would be to just make ready_to_run_
sohanjg
2013/12/31 06:31:25
I am not too sure about this.
When we push a new r
reveman
2014/01/02 03:50:08
The priority queue will not detect changes to elem
vmpstr
2014/01/02 05:29:16
I think make_heap is a good solution to this. I do
| |
417 } | |
364 } | 418 } |
365 } | 419 } |
366 | 420 |
367 // Finally add task to |completed_tasks_|. | 421 // Finally add task to |completed_tasks|. |
368 completed_tasks_.push_back(task); | 422 task_namespace->completed_tasks.push_back(task); |
423 | |
424 // If namespace has finished running all tasks, wake up origin thread. | |
425 if (has_finished_running_tasks(task_namespace)) | |
426 has_namespaces_with_finished_running_tasks_cv_.Signal(); | |
369 } | 427 } |
370 | 428 |
371 // We noticed we should exit. Wake up the next worker so it knows it should | 429 // We noticed we should exit. Wake up the next worker so it knows it should |
372 // exit as well (because the Shutdown() code only signals once). | 430 // exit as well (because the Shutdown() code only signals once). |
373 has_ready_to_run_tasks_cv_.Signal(); | 431 has_ready_to_run_tasks_cv_.Signal(); |
374 } | 432 } |
375 | 433 |
376 WorkerPool::WorkerPool(size_t num_threads, | 434 class CC_EXPORT CompositorRasterTaskGraphRunner |
377 const std::string& thread_name_prefix) | 435 : public TaskGraphRunner { |
378 : in_dispatch_completion_callbacks_(false), | 436 public: |
379 inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) { | 437 CompositorRasterTaskGraphRunner() : TaskGraphRunner( |
438 switches::GetNumRasterThreads(), "CompositorRaster") { | |
reveman
2014/01/02 03:50:08
FYI, we're probably going to have to remove the us
| |
439 } | |
440 }; | |
441 | |
442 base::LazyInstance<CompositorRasterTaskGraphRunner> | |
443 g_task_graph_runner = LAZY_INSTANCE_INITIALIZER; | |
444 | |
445 } // namespace | |
446 | |
447 namespace internal { | |
448 | |
449 WorkerPoolTask::WorkerPoolTask() | |
450 : did_schedule_(false), | |
451 did_run_(false), | |
452 did_complete_(false) { | |
453 } | |
454 | |
455 WorkerPoolTask::~WorkerPoolTask() { | |
456 DCHECK_EQ(did_schedule_, did_complete_); | |
457 DCHECK(!did_run_ || did_schedule_); | |
458 DCHECK(!did_run_ || did_complete_); | |
459 } | |
460 | |
461 void WorkerPoolTask::DidSchedule() { | |
462 DCHECK(!did_complete_); | |
463 did_schedule_ = true; | |
464 } | |
465 | |
466 void WorkerPoolTask::WillRun() { | |
467 DCHECK(did_schedule_); | |
468 DCHECK(!did_complete_); | |
469 DCHECK(!did_run_); | |
470 } | |
471 | |
472 void WorkerPoolTask::DidRun() { | |
473 did_run_ = true; | |
474 } | |
475 | |
476 void WorkerPoolTask::WillComplete() { | |
477 DCHECK(!did_complete_); | |
478 } | |
479 | |
480 void WorkerPoolTask::DidComplete() { | |
481 DCHECK(did_schedule_); | |
482 DCHECK(!did_complete_); | |
483 did_complete_ = true; | |
484 } | |
485 | |
486 bool WorkerPoolTask::HasFinishedRunning() const { | |
487 return did_run_; | |
488 } | |
489 | |
490 bool WorkerPoolTask::HasCompleted() const { | |
491 return did_complete_; | |
492 } | |
493 | |
494 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) | |
495 : task_(task), | |
496 priority_(priority), | |
497 num_dependencies_(0) { | |
498 } | |
499 | |
500 GraphNode::~GraphNode() { | |
501 } | |
502 | |
503 } // namespace internal | |
504 | |
505 WorkerPool::WorkerPool() : in_dispatch_completion_callbacks_(false) { | |
506 g_task_graph_runner.Pointer()->Register(this); | |
380 } | 507 } |
381 | 508 |
382 WorkerPool::~WorkerPool() { | 509 WorkerPool::~WorkerPool() { |
510 g_task_graph_runner.Pointer()->Unregister(this); | |
383 } | 511 } |
384 | 512 |
385 void WorkerPool::Shutdown() { | 513 void WorkerPool::Shutdown() { |
386 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); | 514 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); |
387 | 515 |
388 DCHECK(!in_dispatch_completion_callbacks_); | 516 DCHECK(!in_dispatch_completion_callbacks_); |
389 | 517 |
390 inner_->Shutdown(); | 518 g_task_graph_runner.Pointer()->WaitForTasksToFinishRunning(this); |
391 } | 519 } |
392 | 520 |
393 void WorkerPool::CheckForCompletedTasks() { | 521 void WorkerPool::CheckForCompletedTasks() { |
394 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); | 522 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); |
395 | 523 |
396 DCHECK(!in_dispatch_completion_callbacks_); | 524 DCHECK(!in_dispatch_completion_callbacks_); |
397 | 525 |
398 TaskVector completed_tasks; | 526 TaskVector completed_tasks; |
399 inner_->CollectCompletedTasks(&completed_tasks); | 527 g_task_graph_runner.Pointer()->CollectCompletedTasks(this, &completed_tasks); |
400 ProcessCompletedTasks(completed_tasks); | 528 ProcessCompletedTasks(completed_tasks); |
401 } | 529 } |
402 | 530 |
403 void WorkerPool::ProcessCompletedTasks( | 531 void WorkerPool::ProcessCompletedTasks( |
404 const TaskVector& completed_tasks) { | 532 const TaskVector& completed_tasks) { |
405 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", | 533 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", |
406 "completed_task_count", completed_tasks.size()); | 534 "completed_task_count", completed_tasks.size()); |
407 | 535 |
408 // Worker pool instance is not reentrant while processing completed tasks. | 536 // Worker pool instance is not reentrant while processing completed tasks. |
409 in_dispatch_completion_callbacks_ = true; | 537 in_dispatch_completion_callbacks_ = true; |
(...skipping 10 matching lines...) Expand all Loading... | |
420 | 548 |
421 in_dispatch_completion_callbacks_ = false; | 549 in_dispatch_completion_callbacks_ = false; |
422 } | 550 } |
423 | 551 |
424 void WorkerPool::SetTaskGraph(TaskGraph* graph) { | 552 void WorkerPool::SetTaskGraph(TaskGraph* graph) { |
425 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", | 553 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", |
426 "num_tasks", graph->size()); | 554 "num_tasks", graph->size()); |
427 | 555 |
428 DCHECK(!in_dispatch_completion_callbacks_); | 556 DCHECK(!in_dispatch_completion_callbacks_); |
429 | 557 |
430 inner_->SetTaskGraph(graph); | 558 g_task_graph_runner.Pointer()->SetTaskGraph(this, graph); |
431 } | 559 } |
432 | 560 |
433 } // namespace cc | 561 } // namespace cc |
OLD | NEW |