OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/resources/worker_pool.h" | 5 #include "cc/resources/worker_pool.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <queue> | 8 #include <queue> |
9 | 9 |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
11 #include "base/command_line.h" | |
11 #include "base/containers/hash_tables.h" | 12 #include "base/containers/hash_tables.h" |
12 #include "base/debug/trace_event.h" | 13 #include "base/debug/trace_event.h" |
14 #include "base/lazy_instance.h" | |
15 #include "base/memory/linked_ptr.h" | |
13 #include "base/strings/stringprintf.h" | 16 #include "base/strings/stringprintf.h" |
14 #include "base/synchronization/condition_variable.h" | 17 #include "base/synchronization/condition_variable.h" |
15 #include "base/threading/simple_thread.h" | 18 #include "base/threading/simple_thread.h" |
16 #include "base/threading/thread_restrictions.h" | 19 #include "base/threading/thread_restrictions.h" |
17 #include "cc/base/scoped_ptr_deque.h" | 20 #include "cc/base/scoped_ptr_deque.h" |
21 #include "cc/base/switches.h" | |
18 | 22 |
19 namespace cc { | 23 namespace cc { |
20 | 24 |
21 namespace internal { | 25 namespace { |
22 | 26 |
23 WorkerPoolTask::WorkerPoolTask() | 27 // TaskGraphRunner will be able to run task graphs |
24 : did_schedule_(false), | 28 // from multiple workerpool instances. All members are guarded |
25 did_run_(false), | 29 // by |lock_|. |
26 did_complete_(false) { | 30 class TaskGraphRunner : public base::DelegateSimpleThread::Delegate { |
27 } | 31 public: |
32 TaskGraphRunner(size_t num_threads, const std::string& thread_name_prefix); | |
33 virtual ~TaskGraphRunner(); | |
34 void Register(const WorkerPool* worker_pool); | |
35 void Unregister(const WorkerPool* worker_pool); | |
36 void Shutdown(); | |
28 | 37 |
29 WorkerPoolTask::~WorkerPoolTask() { | 38 typedef WorkerPool::TaskGraph TaskGraph; |
30 DCHECK_EQ(did_schedule_, did_complete_); | |
31 DCHECK(!did_run_ || did_schedule_); | |
32 DCHECK(!did_run_ || did_complete_); | |
33 } | |
34 | 39 |
35 void WorkerPoolTask::DidSchedule() { | 40 typedef WorkerPool::TaskVector TaskVector; |
36 DCHECK(!did_complete_); | |
37 did_schedule_ = true; | |
38 } | |
39 | |
40 void WorkerPoolTask::WillRun() { | |
41 DCHECK(did_schedule_); | |
42 DCHECK(!did_complete_); | |
43 DCHECK(!did_run_); | |
44 } | |
45 | |
46 void WorkerPoolTask::DidRun() { | |
47 did_run_ = true; | |
48 } | |
49 | |
50 void WorkerPoolTask::WillComplete() { | |
51 DCHECK(!did_complete_); | |
52 } | |
53 | |
54 void WorkerPoolTask::DidComplete() { | |
55 DCHECK(did_schedule_); | |
56 DCHECK(!did_complete_); | |
57 did_complete_ = true; | |
58 } | |
59 | |
60 bool WorkerPoolTask::HasFinishedRunning() const { | |
61 return did_run_; | |
62 } | |
63 | |
64 bool WorkerPoolTask::HasCompleted() const { | |
65 return did_complete_; | |
66 } | |
67 | |
68 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) | |
69 : task_(task), | |
70 priority_(priority), | |
71 num_dependencies_(0) { | |
72 } | |
73 | |
74 GraphNode::~GraphNode() { | |
75 } | |
76 | |
77 } // namespace internal | |
78 | |
79 // Internal to the worker pool. Any data or logic that needs to be | |
80 // shared between threads lives in this class. All members are guarded | |
81 // by |lock_|. | |
82 class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { | |
83 public: | |
84 Inner(size_t num_threads, const std::string& thread_name_prefix); | |
85 virtual ~Inner(); | |
86 | |
87 void Shutdown(); | |
88 | 41 |
89 // Schedule running of tasks in |graph|. Tasks previously scheduled but | 42 // Schedule running of tasks in |graph|. Tasks previously scheduled but |
90 // no longer needed will be canceled unless already running. Canceled | 43 // no longer needed will be canceled unless already running. Canceled |
91 // tasks are moved to |completed_tasks_| without being run. The result | 44 // tasks are moved to |completed_tasks| without being run. The result |
92 // is that once scheduled, a task is guaranteed to end up in the | 45 // is that once scheduled, a task is guaranteed to end up in the |
93 // |completed_tasks_| queue even if they later get canceled by another | 46 // |completed_tasks| queue even if they later get canceled by another |
94 // call to SetTaskGraph(). | 47 // call to SetTaskGraph(). |
95 void SetTaskGraph(TaskGraph* graph); | 48 void SetTaskGraph(const WorkerPool* worker_pool, TaskGraph* graph); |
96 | 49 |
97 // Collect all completed tasks in |completed_tasks|. | 50 // Collect all completed tasks in |completed_tasks|. |
98 void CollectCompletedTasks(TaskVector* completed_tasks); | 51 void CollectCompletedTasks(const WorkerPool* worker_pool, |
52 TaskVector* completed_tasks); | |
99 | 53 |
100 private: | 54 private: |
101 class PriorityComparator { | 55 class TaskPriorityComparator { |
102 public: | 56 public: |
103 bool operator()(const internal::GraphNode* a, | 57 bool operator()(internal::GraphNode* a, |
104 const internal::GraphNode* b) { | 58 internal::GraphNode* b) { |
105 // In this system, numerically lower priority is run first. | 59 // In this system, numerically lower priority is run first. |
106 if (a->priority() != b->priority()) | 60 if (a->priority() != b->priority()) |
107 return a->priority() > b->priority(); | 61 return a->priority() > b->priority(); |
108 | 62 |
109 // Run task with most dependents first when priority is the same. | 63 // Run task with most dependents first when priority is the same. |
110 return a->dependents().size() < b->dependents().size(); | 64 return a->dependents().size() < b->dependents().size(); |
111 } | 65 } |
112 }; | 66 }; |
113 | 67 |
68 // Ordered set of tasks that are ready to run. | |
69 typedef std::priority_queue<internal::GraphNode*, | |
70 std::vector<internal::GraphNode*>, | |
71 TaskPriorityComparator> TaskQueue; | |
72 | |
73 struct TaskNamespace { | |
74 TaskGraph pending_tasks; | |
75 TaskGraph running_tasks; | |
76 TaskVector completed_tasks; | |
77 TaskQueue ready_to_run_tasks; | |
78 }; | |
79 | |
80 class TaskNamespacePriorityComparator { | |
81 public: | |
82 bool operator()(const linked_ptr<TaskNamespace> a, | |
83 const linked_ptr<TaskNamespace> b) { | |
reveman
2013/12/19 02:09:38
I like to see you reuse the TaskPriorityComparator
sohanjg
2013/12/19 15:00:59
Done.
I had to loose the constantness of TaskPrio
| |
84 if (a->ready_to_run_tasks.top()->priority() | |
85 != b->ready_to_run_tasks.top()->priority()) | |
86 return a->ready_to_run_tasks.top()->priority() > | |
87 b->ready_to_run_tasks.top()->priority(); | |
88 | |
89 return a->ready_to_run_tasks.top()->dependents().size() > | |
90 b->ready_to_run_tasks.top()->dependents().size(); | |
91 } | |
92 }; | |
93 | |
94 typedef std::map<const WorkerPool*, | |
95 linked_ptr<TaskNamespace> > TaskNamespaceMap; | |
96 | |
97 typedef std::priority_queue<linked_ptr<TaskNamespace>, | |
98 std::vector<linked_ptr<TaskNamespace> >, | |
reveman
2013/12/19 02:09:38
no need for linked_ptr here. this can hold raw poi
sohanjg
2013/12/19 15:00:59
Done.
| |
99 TaskNamespacePriorityComparator> NamespaceQueue; | |
100 | |
114 // Overridden from base::DelegateSimpleThread: | 101 // Overridden from base::DelegateSimpleThread: |
115 virtual void Run() OVERRIDE; | 102 virtual void Run() OVERRIDE; |
116 | 103 |
117 // This lock protects all members of this class except | 104 // This lock protects all members of this class except |
118 // |worker_pool_on_origin_thread_|. Do not read or modify anything | 105 // |worker_pool_on_origin_thread_|. Do not read or modify anything |
119 // without holding this lock. Do not block while holding this lock. | 106 // without holding this lock. Do not block while holding this lock. |
120 mutable base::Lock lock_; | 107 mutable base::Lock lock_; |
121 | 108 |
122 // Condition variable that is waited on by worker threads until new | 109 // Condition variable that is waited on by worker threads until new |
123 // tasks are ready to run or shutdown starts. | 110 // tasks are ready to run or shutdown starts. |
124 base::ConditionVariable has_ready_to_run_tasks_cv_; | 111 base::ConditionVariable has_ready_to_run_tasks_cv_; |
125 | 112 |
126 // Provides each running thread loop with a unique index. First thread | 113 // Provides each running thread loop with a unique index. First thread |
127 // loop index is 0. | 114 // loop index is 0. |
128 unsigned next_thread_index_; | 115 unsigned next_thread_index_; |
129 | 116 |
130 // Set during shutdown. Tells workers to exit when no more tasks | 117 // Set during shutdown. Tells workers to exit when no more tasks |
131 // are pending. | 118 // are pending. |
132 bool shutdown_; | 119 bool shutdown_; |
133 | 120 |
134 // This set contains all pending tasks. | |
135 GraphNodeMap pending_tasks_; | |
136 | |
137 // Ordered set of tasks that are ready to run. | |
138 typedef std::priority_queue<internal::GraphNode*, | |
139 std::vector<internal::GraphNode*>, | |
140 PriorityComparator> TaskQueue; | |
141 TaskQueue ready_to_run_tasks_; | |
142 | |
143 // This set contains all currently running tasks. | |
144 GraphNodeMap running_tasks_; | |
145 | |
146 // Completed tasks not yet collected by origin thread. | |
147 TaskVector completed_tasks_; | |
148 | |
149 ScopedPtrDeque<base::DelegateSimpleThread> workers_; | 121 ScopedPtrDeque<base::DelegateSimpleThread> workers_; |
150 | 122 |
151 DISALLOW_COPY_AND_ASSIGN(Inner); | 123 TaskNamespaceMap namespaces_; |
124 | |
125 NamespaceQueue ready_to_run_namespaces_; | |
126 | |
127 DISALLOW_COPY_AND_ASSIGN(TaskGraphRunner); | |
152 }; | 128 }; |
153 | 129 |
154 WorkerPool::Inner::Inner( | 130 class CC_EXPORT DerivedInner : public TaskGraphRunner { |
131 public: | |
132 DerivedInner(); | |
133 }; | |
134 | |
135 base::LazyInstance<DerivedInner> g_workerpool_inner; | |
136 | |
137 | |
138 TaskGraphRunner::TaskGraphRunner( | |
155 size_t num_threads, const std::string& thread_name_prefix) | 139 size_t num_threads, const std::string& thread_name_prefix) |
156 : lock_(), | 140 : lock_(), |
157 has_ready_to_run_tasks_cv_(&lock_), | 141 has_ready_to_run_tasks_cv_(&lock_), |
158 next_thread_index_(0), | 142 next_thread_index_(0), |
159 shutdown_(false) { | 143 shutdown_(false) { |
160 base::AutoLock lock(lock_); | 144 base::AutoLock lock(lock_); |
161 | 145 |
162 while (workers_.size() < num_threads) { | 146 while (workers_.size() < num_threads) { |
163 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( | 147 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( |
164 new base::DelegateSimpleThread( | 148 new base::DelegateSimpleThread( |
165 this, | 149 this, |
166 thread_name_prefix + | 150 thread_name_prefix + |
167 base::StringPrintf( | 151 base::StringPrintf( |
168 "Worker%u", | 152 "Worker%u", |
169 static_cast<unsigned>(workers_.size() + 1)).c_str())); | 153 static_cast<unsigned>(workers_.size() + 1)).c_str())); |
170 worker->Start(); | 154 worker->Start(); |
171 #if defined(OS_ANDROID) || defined(OS_LINUX) | 155 #if defined(OS_ANDROID) || defined(OS_LINUX) |
172 worker->SetThreadPriority(base::kThreadPriority_Background); | 156 worker->SetThreadPriority(base::kThreadPriority_Background); |
173 #endif | 157 #endif |
174 workers_.push_back(worker.Pass()); | 158 workers_.push_back(worker.Pass()); |
175 } | 159 } |
176 } | 160 } |
177 | 161 |
178 WorkerPool::Inner::~Inner() { | 162 TaskGraphRunner::~TaskGraphRunner() { |
179 base::AutoLock lock(lock_); | 163 base::AutoLock lock(lock_); |
180 | 164 |
181 DCHECK(shutdown_); | 165 DCHECK(shutdown_); |
182 | 166 DCHECK_EQ(0u, ready_to_run_namespaces_.size()); |
183 DCHECK_EQ(0u, pending_tasks_.size()); | |
184 DCHECK_EQ(0u, ready_to_run_tasks_.size()); | |
185 DCHECK_EQ(0u, running_tasks_.size()); | |
186 DCHECK_EQ(0u, completed_tasks_.size()); | |
187 } | 167 } |
188 | 168 |
189 void WorkerPool::Inner::Shutdown() { | 169 void TaskGraphRunner::Register(const WorkerPool* worker_pool) { |
170 base::AutoLock lock(lock_); | |
171 | |
172 DCHECK(namespaces_.find(worker_pool) == namespaces_.end()); | |
173 linked_ptr<TaskNamespace> task_set = make_linked_ptr(new TaskNamespace()); | |
174 namespaces_[worker_pool] = task_set; | |
175 } | |
176 void TaskGraphRunner::Unregister(const WorkerPool* worker_pool) { | |
177 base::AutoLock lock(lock_); | |
178 | |
179 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
180 namespaces_.erase(worker_pool); | |
181 } | |
182 | |
183 void TaskGraphRunner::Shutdown() { | |
190 { | 184 { |
191 base::AutoLock lock(lock_); | 185 base::AutoLock lock(lock_); |
192 | 186 |
193 DCHECK(!shutdown_); | 187 DCHECK(!shutdown_); |
194 shutdown_ = true; | 188 shutdown_ = true; |
195 | |
196 // Wake up a worker so it knows it should exit. This will cause all workers | 189 // Wake up a worker so it knows it should exit. This will cause all workers |
197 // to exit as each will wake up another worker before exiting. | 190 // to exit as each will wake up another worker before exiting. |
198 has_ready_to_run_tasks_cv_.Signal(); | 191 has_ready_to_run_tasks_cv_.Signal(); |
199 } | 192 } |
200 | 193 |
201 while (workers_.size()) { | 194 while (workers_.size()) { |
202 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); | 195 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); |
203 // http://crbug.com/240453 - Join() is considered IO and will block this | 196 // http://crbug.com/240453 - Join() is considered IO and will block this |
204 // thread. See also http://crbug.com/239423 for further ideas. | 197 // thread. See also http://crbug.com/239423 for further ideas. |
205 base::ThreadRestrictions::ScopedAllowIO allow_io; | 198 base::ThreadRestrictions::ScopedAllowIO allow_io; |
206 worker->Join(); | 199 worker->Join(); |
207 } | 200 } |
208 } | 201 } |
209 | 202 |
210 void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { | 203 void TaskGraphRunner::SetTaskGraph(const WorkerPool* worker_pool, |
204 TaskGraph* graph) { | |
211 // It is OK to call SetTaskGraph() after shutdown if |graph| is empty. | 205 // It is OK to call SetTaskGraph() after shutdown if |graph| is empty. |
212 DCHECK(graph->empty() || !shutdown_); | 206 DCHECK(graph->empty() || !shutdown_); |
213 | 207 |
214 GraphNodeMap new_pending_tasks; | 208 TaskGraph new_pending_tasks; |
215 GraphNodeMap new_running_tasks; | 209 TaskGraph new_running_tasks; |
216 TaskQueue new_ready_to_run_tasks; | 210 TaskQueue new_ready_to_run_tasks; |
217 | 211 |
218 new_pending_tasks.swap(*graph); | 212 new_pending_tasks.swap(*graph); |
219 | 213 |
220 { | 214 { |
221 base::AutoLock lock(lock_); | 215 base::AutoLock lock(lock_); |
222 | 216 |
217 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
218 linked_ptr<TaskNamespace> task_set = namespaces_[worker_pool]; | |
reveman
2013/12/19 02:09:38
task_namespace rather than task_set please. too ba
| |
219 | |
223 // First remove all completed tasks from |new_pending_tasks| and | 220 // First remove all completed tasks from |new_pending_tasks| and |
224 // adjust number of dependencies. | 221 // adjust number of dependencies. |
225 for (TaskVector::iterator it = completed_tasks_.begin(); | 222 for (TaskVector::iterator it = task_set->completed_tasks.begin(); |
226 it != completed_tasks_.end(); ++it) { | 223 it != task_set->completed_tasks.end(); ++it) { |
227 internal::WorkerPoolTask* task = it->get(); | 224 internal::WorkerPoolTask* task = it->get(); |
228 | |
229 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( | 225 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( |
230 task); | 226 task); |
231 if (node) { | 227 if (node) { |
232 for (internal::GraphNode::Vector::const_iterator it = | 228 for (internal::GraphNode::Vector::const_iterator it = |
233 node->dependents().begin(); | 229 node->dependents().begin(); |
234 it != node->dependents().end(); ++it) { | 230 it != node->dependents().end(); ++it) { |
235 internal::GraphNode* dependent_node = *it; | 231 internal::GraphNode* dependent_node = *it; |
236 dependent_node->remove_dependency(); | 232 dependent_node->remove_dependency(); |
237 } | 233 } |
238 } | 234 } |
239 } | 235 } |
240 | |
241 // Build new running task set. | 236 // Build new running task set. |
242 for (GraphNodeMap::iterator it = running_tasks_.begin(); | 237 for (TaskGraph::iterator it = |
243 it != running_tasks_.end(); ++it) { | 238 task_set->running_tasks.begin(); |
239 it != task_set->running_tasks.end(); ++it) { | |
244 internal::WorkerPoolTask* task = it->first; | 240 internal::WorkerPoolTask* task = it->first; |
245 // Transfer scheduled task value from |new_pending_tasks| to | 241 // Transfer scheduled task value from |new_pending_tasks| to |
246 // |new_running_tasks| if currently running. Value must be set to | 242 // |new_running_tasks| if currently running. Value must be set to |
247 // NULL if |new_pending_tasks| doesn't contain task. This does | 243 // NULL if |new_pending_tasks| doesn't contain task. This does |
248 // the right in both cases. | 244 // the right in both cases. |
249 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); | 245 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); |
250 } | 246 } |
251 | 247 |
252 // Build new "ready to run" tasks queue. | 248 // Build new "ready to run" tasks queue. |
253 // TODO(reveman): Create this queue when building the task graph instead. | 249 // TODO(reveman): Create this queue when building the task graph instead. |
254 for (GraphNodeMap::iterator it = new_pending_tasks.begin(); | 250 for (TaskGraph::iterator it = new_pending_tasks.begin(); |
255 it != new_pending_tasks.end(); ++it) { | 251 it != new_pending_tasks.end(); ++it) { |
256 internal::WorkerPoolTask* task = it->first; | 252 internal::WorkerPoolTask* task = it->first; |
257 DCHECK(task); | 253 DCHECK(task); |
258 internal::GraphNode* node = it->second; | 254 internal::GraphNode* node = it->second; |
259 | 255 |
260 // Completed tasks should not exist in |new_pending_tasks|. | 256 // Completed tasks should not exist in |new_pending_tasks|. |
261 DCHECK(!task->HasFinishedRunning()); | 257 DCHECK(!task->HasFinishedRunning()); |
262 | 258 |
263 // Call DidSchedule() to indicate that this task has been scheduled. | 259 // Call DidSchedule() to indicate that this task has been scheduled. |
264 // Note: This is only for debugging purposes. | 260 // Note: This is only for debugging purposes. |
265 task->DidSchedule(); | 261 task->DidSchedule(); |
266 | 262 |
267 if (!node->num_dependencies()) | 263 if (!node->num_dependencies()) |
268 new_ready_to_run_tasks.push(node); | 264 new_ready_to_run_tasks.push(node); |
269 | 265 |
270 // Erase the task from old pending tasks. | 266 // Erase the task from old pending tasks. |
271 pending_tasks_.erase(task); | 267 task_set->pending_tasks.erase(task); |
268 | |
272 } | 269 } |
273 | 270 |
274 completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size()); | 271 task_set->completed_tasks.reserve( |
272 task_set->completed_tasks.size() + | |
273 task_set->pending_tasks.size()); | |
275 | 274 |
276 // The items left in |pending_tasks_| need to be canceled. | 275 // The items left in |pending_tasks| need to be canceled. |
277 for (GraphNodeMap::const_iterator it = pending_tasks_.begin(); | 276 for (TaskGraph::const_iterator it = |
278 it != pending_tasks_.end(); | 277 task_set->pending_tasks.begin(); |
279 ++it) { | 278 it != task_set->pending_tasks.end(); |
280 completed_tasks_.push_back(it->first); | 279 ++it) { |
280 task_set->completed_tasks.push_back(it->first); | |
281 } | 281 } |
282 | 282 |
283 // Swap task sets. | 283 // Swap task sets. |
284 // Note: old tasks are intentionally destroyed after releasing |lock_|. | 284 // Note: old tasks are intentionally destroyed after releasing |lock_|. |
285 pending_tasks_.swap(new_pending_tasks); | 285 task_set->pending_tasks.swap(new_pending_tasks); |
286 running_tasks_.swap(new_running_tasks); | 286 task_set->running_tasks.swap(new_running_tasks); |
287 std::swap(ready_to_run_tasks_, new_ready_to_run_tasks); | 287 std::swap(task_set->ready_to_run_tasks, new_ready_to_run_tasks); |
288 | 288 |
289 // If |ready_to_run_tasks_| is empty, it means we either have | 289 // Re-create the ready_to_run_namespaces_ with new TaskNamespace |
290 while (!ready_to_run_namespaces_.empty()) | |
291 ready_to_run_namespaces_.pop(); | |
292 ready_to_run_namespaces_.push(task_set); | |
293 | |
294 // If |ready_to_run_tasks| is empty, it means we either have | |
290 // running tasks, or we have no pending tasks. | 295 // running tasks, or we have no pending tasks. |
291 DCHECK(!ready_to_run_tasks_.empty() || | 296 DCHECK(!task_set->ready_to_run_tasks.empty() || |
292 (pending_tasks_.empty() || !running_tasks_.empty())); | 297 (task_set->pending_tasks.empty() || |
298 !task_set->running_tasks.empty())); | |
293 | 299 |
294 // If there is more work available, wake up worker thread. | 300 // If there is more work available, wake up worker thread. |
295 if (!ready_to_run_tasks_.empty()) | 301 if (!task_set->ready_to_run_tasks.empty()) |
296 has_ready_to_run_tasks_cv_.Signal(); | 302 has_ready_to_run_tasks_cv_.Signal(); |
297 } | 303 } |
298 } | 304 } |
299 | 305 |
300 void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) { | 306 void TaskGraphRunner::CollectCompletedTasks |
307 (const WorkerPool* worker_pool, TaskVector* completed_tasks) { | |
301 base::AutoLock lock(lock_); | 308 base::AutoLock lock(lock_); |
302 | 309 |
303 DCHECK_EQ(0u, completed_tasks->size()); | 310 DCHECK_EQ(0u, completed_tasks->size()); |
304 completed_tasks->swap(completed_tasks_); | 311 if (!ready_to_run_namespaces_.empty()) |
312 completed_tasks->swap(ready_to_run_namespaces_.top()->completed_tasks); | |
305 } | 313 } |
306 | 314 |
307 void WorkerPool::Inner::Run() { | 315 void TaskGraphRunner::Run() { |
308 base::AutoLock lock(lock_); | 316 base::AutoLock lock(lock_); |
309 | 317 |
310 // Get a unique thread index. | 318 // Get a unique thread index. |
311 int thread_index = next_thread_index_++; | 319 int thread_index = next_thread_index_++; |
320 linked_ptr<TaskNamespace> ready_to_run_task_set; | |
312 | 321 |
313 while (true) { | 322 while (true) { |
314 if (ready_to_run_tasks_.empty()) { | 323 if (ready_to_run_namespaces_.empty()) { |
315 // Exit when shutdown is set and no more tasks are pending. | 324 // Exit when shutdown is set and no more tasks are pending. |
316 if (shutdown_ && pending_tasks_.empty()) | 325 if (shutdown_ && ready_to_run_task_set->pending_tasks.empty()) { |
317 break; | 326 break; |
318 | 327 } |
319 // Wait for more tasks. | 328 // Wait for more tasks. |
320 has_ready_to_run_tasks_cv_.Wait(); | 329 has_ready_to_run_tasks_cv_.Wait(); |
321 continue; | 330 continue; |
322 } | 331 } |
323 | 332 |
324 // Take top priority task from |ready_to_run_tasks_|. | 333 // Take top priority TaskNamespace from |ready_to_run_namespaces_|. |
334 ready_to_run_task_set = ready_to_run_namespaces_.top(); | |
335 | |
336 // Take top priority task from |ready_to_run_tasks|. | |
325 scoped_refptr<internal::WorkerPoolTask> task( | 337 scoped_refptr<internal::WorkerPoolTask> task( |
326 ready_to_run_tasks_.top()->task()); | 338 ready_to_run_task_set->ready_to_run_tasks.top()->task()); |
327 ready_to_run_tasks_.pop(); | 339 ready_to_run_task_set->ready_to_run_tasks.pop(); |
328 | 340 |
329 // Move task from |pending_tasks_| to |running_tasks_|. | 341 |
330 DCHECK(pending_tasks_.contains(task.get())); | 342 // Move task from |pending_tasks| to |running_tasks|. |
331 DCHECK(!running_tasks_.contains(task.get())); | 343 DCHECK(ready_to_run_task_set->pending_tasks.contains(task.get())); |
332 running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get())); | 344 DCHECK(!ready_to_run_task_set->running_tasks.contains(task.get())); |
345 | |
346 ready_to_run_task_set->running_tasks.set( | |
347 task.get(), ready_to_run_task_set->pending_tasks.take_and_erase | |
348 (task.get())); | |
333 | 349 |
334 // There may be more work available, so wake up another worker thread. | 350 // There may be more work available, so wake up another worker thread. |
335 has_ready_to_run_tasks_cv_.Signal(); | 351 has_ready_to_run_tasks_cv_.Signal(); |
336 | 352 |
353 | |
337 // Call WillRun() before releasing |lock_| and running task. | 354 // Call WillRun() before releasing |lock_| and running task. |
338 task->WillRun(); | 355 task->WillRun(); |
339 | 356 |
340 { | 357 { |
341 base::AutoUnlock unlock(lock_); | 358 base::AutoUnlock unlock(lock_); |
342 | |
343 task->RunOnWorkerThread(thread_index); | 359 task->RunOnWorkerThread(thread_index); |
344 } | 360 } |
345 | 361 |
346 // This will mark task as finished running. | 362 // This will mark task as finished running. |
347 task->DidRun(); | 363 task->DidRun(); |
348 | 364 |
349 // Now iterate over all dependents to remove dependency and check | 365 // Now iterate over all dependents to remove dependency and check |
350 // if they are ready to run. | 366 // if they are ready to run. |
351 scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase( | 367 scoped_ptr<internal::GraphNode> node = |
368 ready_to_run_task_set->running_tasks.take_and_erase( | |
352 task.get()); | 369 task.get()); |
353 if (node) { | 370 if (node) { |
354 for (internal::GraphNode::Vector::const_iterator it = | 371 for (internal::GraphNode::Vector::const_iterator it = |
355 node->dependents().begin(); | 372 node->dependents().begin(); |
356 it != node->dependents().end(); ++it) { | 373 it != node->dependents().end(); ++it) { |
357 internal::GraphNode* dependent_node = *it; | 374 internal::GraphNode* dependent_node = *it; |
358 | 375 |
359 dependent_node->remove_dependency(); | 376 dependent_node->remove_dependency(); |
360 // Task is ready if it has no dependencies. Add it to | 377 // Task is ready if it has no dependencies. Add it to |
361 // |ready_to_run_tasks_|. | 378 // |ready_to_run_tasks|. |
362 if (!dependent_node->num_dependencies()) | 379 if (!dependent_node->num_dependencies()) |
363 ready_to_run_tasks_.push(dependent_node); | 380 ready_to_run_task_set->ready_to_run_tasks.push(dependent_node); |
381 } | |
364 } | 382 } |
365 } | |
366 | 383 |
367 // Finally add task to |completed_tasks_|. | 384 // Finally add task to |completed_tasks|. |
368 completed_tasks_.push_back(task); | 385 ready_to_run_task_set->completed_tasks.push_back(task); |
386 | |
387 // Pop when ready_to_run_tasks is empty | |
388 if (ready_to_run_task_set->ready_to_run_tasks.empty()) | |
389 ready_to_run_namespaces_.pop(); | |
369 } | 390 } |
370 | 391 |
371 // We noticed we should exit. Wake up the next worker so it knows it should | 392 // We noticed we should exit. Wake up the next worker so it knows it should |
372 // exit as well (because the Shutdown() code only signals once). | 393 // exit as well (because the Shutdown() code only signals once). |
373 has_ready_to_run_tasks_cv_.Signal(); | 394 has_ready_to_run_tasks_cv_.Signal(); |
374 } | 395 } |
375 | 396 |
397 // Derived TaskGraphRunner Ctor | |
398 DerivedInner::DerivedInner(): TaskGraphRunner | |
399 (switches::GetNumRasterThreads(), "CompositorRaster") { | |
400 } | |
401 | |
402 } // namespace | |
403 | |
404 namespace internal { | |
405 | |
406 WorkerPoolTask::WorkerPoolTask() | |
407 : did_schedule_(false), | |
408 did_run_(false), | |
409 did_complete_(false) { | |
410 } | |
411 | |
412 WorkerPoolTask::~WorkerPoolTask() { | |
413 DCHECK_EQ(did_schedule_, did_complete_); | |
414 DCHECK(!did_run_ || did_schedule_); | |
415 DCHECK(!did_run_ || did_complete_); | |
416 } | |
417 | |
418 void WorkerPoolTask::DidSchedule() { | |
419 DCHECK(!did_complete_); | |
420 did_schedule_ = true; | |
421 } | |
422 | |
423 void WorkerPoolTask::WillRun() { | |
424 DCHECK(did_schedule_); | |
425 DCHECK(!did_complete_); | |
426 DCHECK(!did_run_); | |
427 } | |
428 | |
429 void WorkerPoolTask::DidRun() { | |
430 did_run_ = true; | |
431 } | |
432 | |
433 void WorkerPoolTask::WillComplete() { | |
434 DCHECK(!did_complete_); | |
435 } | |
436 | |
437 void WorkerPoolTask::DidComplete() { | |
438 DCHECK(did_schedule_); | |
439 DCHECK(!did_complete_); | |
440 did_complete_ = true; | |
441 } | |
442 | |
443 bool WorkerPoolTask::HasFinishedRunning() const { | |
444 return did_run_; | |
445 } | |
446 | |
447 bool WorkerPoolTask::HasCompleted() const { | |
448 return did_complete_; | |
449 } | |
450 | |
451 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) | |
452 : task_(task), | |
453 priority_(priority), | |
454 num_dependencies_(0) { | |
455 } | |
456 | |
457 GraphNode::~GraphNode() { | |
458 } | |
459 | |
460 } // namespace internal | |
461 | |
462 | |
376 WorkerPool::WorkerPool(size_t num_threads, | 463 WorkerPool::WorkerPool(size_t num_threads, |
377 const std::string& thread_name_prefix) | 464 const std::string& thread_name_prefix) |
378 : in_dispatch_completion_callbacks_(false), | 465 : in_dispatch_completion_callbacks_(false) { |
379 inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) { | 466 g_workerpool_inner.Pointer()->Register(this); |
380 } | 467 } |
381 | 468 |
382 WorkerPool::~WorkerPool() { | 469 WorkerPool::~WorkerPool() { |
470 g_workerpool_inner.Pointer()->Unregister(this); | |
383 } | 471 } |
384 | 472 |
385 void WorkerPool::Shutdown() { | 473 void WorkerPool::Shutdown() { |
386 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); | 474 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); |
387 | 475 |
388 DCHECK(!in_dispatch_completion_callbacks_); | 476 DCHECK(!in_dispatch_completion_callbacks_); |
389 | 477 g_workerpool_inner.Pointer()->Shutdown(); |
390 inner_->Shutdown(); | |
391 } | 478 } |
392 | 479 |
393 void WorkerPool::CheckForCompletedTasks() { | 480 void WorkerPool::CheckForCompletedTasks() { |
394 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); | 481 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); |
395 | 482 |
396 DCHECK(!in_dispatch_completion_callbacks_); | 483 DCHECK(!in_dispatch_completion_callbacks_); |
397 | 484 |
398 TaskVector completed_tasks; | 485 TaskVector completed_tasks; |
399 inner_->CollectCompletedTasks(&completed_tasks); | 486 g_workerpool_inner.Pointer()->CollectCompletedTasks(this, &completed_tasks); |
400 ProcessCompletedTasks(completed_tasks); | 487 ProcessCompletedTasks(completed_tasks); |
401 } | 488 } |
402 | 489 |
403 void WorkerPool::ProcessCompletedTasks( | 490 void WorkerPool::ProcessCompletedTasks( |
404 const TaskVector& completed_tasks) { | 491 const TaskVector& completed_tasks) { |
405 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", | 492 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", |
406 "completed_task_count", completed_tasks.size()); | 493 "completed_task_count", completed_tasks.size()); |
407 | 494 |
408 // Worker pool instance is not reentrant while processing completed tasks. | 495 // Worker pool instance is not reentrant while processing completed tasks. |
409 in_dispatch_completion_callbacks_ = true; | 496 in_dispatch_completion_callbacks_ = true; |
410 | 497 |
411 for (TaskVector::const_iterator it = completed_tasks.begin(); | 498 for (TaskVector::const_iterator it = completed_tasks.begin(); |
412 it != completed_tasks.end(); | 499 it != completed_tasks.end(); |
413 ++it) { | 500 ++it) { |
414 internal::WorkerPoolTask* task = it->get(); | 501 internal::WorkerPoolTask* task = it->get(); |
415 | 502 |
416 task->WillComplete(); | 503 task->WillComplete(); |
417 task->CompleteOnOriginThread(); | 504 task->CompleteOnOriginThread(); |
418 task->DidComplete(); | 505 task->DidComplete(); |
419 } | 506 } |
420 | 507 |
421 in_dispatch_completion_callbacks_ = false; | 508 in_dispatch_completion_callbacks_ = false; |
422 } | 509 } |
423 | 510 |
424 void WorkerPool::SetTaskGraph(TaskGraph* graph) { | 511 void WorkerPool::SetTaskGraph(TaskGraph* graph) { |
425 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", | 512 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", |
426 "num_tasks", graph->size()); | 513 "num_tasks", graph->size()); |
427 | 514 |
428 DCHECK(!in_dispatch_completion_callbacks_); | 515 DCHECK(!in_dispatch_completion_callbacks_); |
429 | 516 g_workerpool_inner.Pointer()->SetTaskGraph(this, graph); |
430 inner_->SetTaskGraph(graph); | |
431 } | 517 } |
432 | 518 |
433 } // namespace cc | 519 } // namespace cc |
OLD | NEW |