OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "cc/resources/worker_pool.h" | 5 #include "cc/resources/worker_pool.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/bind.h" | 9 #include "base/bind.h" |
10 #include "base/containers/hash_tables.h" | 10 #include "base/containers/hash_tables.h" |
11 #include "base/debug/trace_event.h" | 11 #include "base/debug/trace_event.h" |
12 #include "base/lazy_instance.h" | |
13 #include "base/memory/linked_ptr.h" | |
12 #include "base/strings/stringprintf.h" | 14 #include "base/strings/stringprintf.h" |
13 #include "base/synchronization/condition_variable.h" | 15 #include "base/synchronization/condition_variable.h" |
14 #include "base/threading/simple_thread.h" | 16 #include "base/threading/simple_thread.h" |
15 #include "base/threading/thread_restrictions.h" | 17 #include "base/threading/thread_restrictions.h" |
16 #include "cc/base/scoped_ptr_deque.h" | 18 #include "cc/base/scoped_ptr_deque.h" |
17 | 19 |
18 namespace cc { | 20 namespace cc { |
19 | 21 |
20 namespace internal { | 22 namespace { |
21 | 23 |
22 WorkerPoolTask::WorkerPoolTask() | 24 // TaskGraphRunners can process task graphs from multiple |
23 : did_schedule_(false), | 25 // workerpool instances. All members are guarded by |lock_|. |
24 did_run_(false), | 26 class TaskGraphRunner : public base::DelegateSimpleThread::Delegate { |
25 did_complete_(false) { | 27 public: |
26 } | 28 typedef WorkerPool::TaskGraph TaskGraph; |
29 typedef WorkerPool::TaskVector TaskVector; | |
27 | 30 |
28 WorkerPoolTask::~WorkerPoolTask() { | 31 TaskGraphRunner(size_t num_threads, const std::string& thread_name_prefix); |
29 DCHECK_EQ(did_schedule_, did_complete_); | 32 virtual ~TaskGraphRunner(); |
30 DCHECK(!did_run_ || did_schedule_); | |
31 DCHECK(!did_run_ || did_complete_); | |
32 } | |
33 | 33 |
34 void WorkerPoolTask::DidSchedule() { | 34 void Register(const WorkerPool* worker_pool); |
35 DCHECK(!did_complete_); | 35 void Unregister(const WorkerPool* worker_pool); |
36 did_schedule_ = true; | |
37 } | |
38 | |
39 void WorkerPoolTask::WillRun() { | |
40 DCHECK(did_schedule_); | |
41 DCHECK(!did_complete_); | |
42 DCHECK(!did_run_); | |
43 } | |
44 | |
45 void WorkerPoolTask::DidRun() { | |
46 did_run_ = true; | |
47 } | |
48 | |
49 void WorkerPoolTask::WillComplete() { | |
50 DCHECK(!did_complete_); | |
51 } | |
52 | |
53 void WorkerPoolTask::DidComplete() { | |
54 DCHECK(did_schedule_); | |
55 DCHECK(!did_complete_); | |
56 did_complete_ = true; | |
57 } | |
58 | |
59 bool WorkerPoolTask::HasFinishedRunning() const { | |
60 return did_run_; | |
61 } | |
62 | |
63 bool WorkerPoolTask::HasCompleted() const { | |
64 return did_complete_; | |
65 } | |
66 | |
67 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) | |
68 : task_(task), | |
69 priority_(priority), | |
70 num_dependencies_(0) { | |
71 } | |
72 | |
73 GraphNode::~GraphNode() { | |
74 } | |
75 | |
76 } // namespace internal | |
77 | |
78 // Internal to the worker pool. Any data or logic that needs to be | |
79 // shared between threads lives in this class. All members are guarded | |
80 // by |lock_|. | |
81 class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { | |
82 public: | |
83 Inner(size_t num_threads, const std::string& thread_name_prefix); | |
84 virtual ~Inner(); | |
85 | |
86 void Shutdown(); | |
87 | |
88 // Schedule running of tasks in |graph|. Tasks previously scheduled but | 36 // Schedule running of tasks in |graph|. Tasks previously scheduled but |
89 // no longer needed will be canceled unless already running. Canceled | 37 // no longer needed will be canceled unless already running. Canceled |
90 // tasks are moved to |completed_tasks_| without being run. The result | 38 // tasks are moved to |completed_tasks| without being run. The result |
91 // is that once scheduled, a task is guaranteed to end up in the | 39 // is that once scheduled, a task is guaranteed to end up in the |
92 // |completed_tasks_| queue even if they later get canceled by another | 40 // |completed_tasks| queue even if it later get canceled by another |
93 // call to SetTaskGraph(). | 41 // call to SetTaskGraph(). |
94 void SetTaskGraph(TaskGraph* graph); | 42 void SetTaskGraph(const WorkerPool* worker_pool, TaskGraph* graph); |
43 | |
44 // Wait for all scheduled tasks to finish running. | |
45 void WaitForTasksToFinishRunning(const WorkerPool* worker_pool); | |
95 | 46 |
96 // Collect all completed tasks in |completed_tasks|. | 47 // Collect all completed tasks in |completed_tasks|. |
97 void CollectCompletedTasks(TaskVector* completed_tasks); | 48 void CollectCompletedTasks(const WorkerPool* worker_pool, |
49 TaskVector* completed_tasks); | |
98 | 50 |
99 private: | 51 private: |
100 static bool CompareTaskPriority(const internal::GraphNode* a, | 52 static bool CompareTaskPriority(const internal::GraphNode* a, |
101 const internal::GraphNode* b) { | 53 const internal::GraphNode* b) { |
102 // In this system, numerically lower priority is run first. | 54 // In this system, numerically lower priority is run first. |
103 if (a->priority() != b->priority()) | 55 if (a->priority() != b->priority()) |
104 return a->priority() > b->priority(); | 56 return a->priority() > b->priority(); |
105 | 57 |
106 // Run task with most dependents first when priority is the same. | 58 // Run task with most dependents first when priority is the same. |
107 return a->dependents().size() < b->dependents().size(); | 59 return a->dependents().size() < b->dependents().size(); |
108 } | 60 } |
109 | 61 |
62 struct TaskNamespace { | |
63 // This set contains all pending tasks. | |
64 TaskGraph pending_tasks; | |
65 // This set contains all currently running tasks. | |
66 TaskGraph running_tasks; | |
67 // Completed tasks not yet collected by origin thread. | |
68 TaskVector completed_tasks; | |
69 // Ordered set of tasks that are ready to run. | |
70 internal::GraphNode::Vector ready_to_run_tasks; | |
71 }; | |
72 | |
73 static bool CompareTaskNamespacePriority(const TaskNamespace* a, | |
74 const TaskNamespace* b) { | |
75 DCHECK(!a->ready_to_run_tasks.empty()); | |
76 DCHECK(!b->ready_to_run_tasks.empty()); | |
77 | |
78 // Compare based on task priority of the ready_to_run_tasks heap | |
79 // .front() will hold the max element of the heap, | |
80 // except after pop_heap, when max element is moved to .back(). | |
81 return CompareTaskPriority(a->ready_to_run_tasks.front(), | |
82 b->ready_to_run_tasks.front()); | |
83 } | |
84 | |
85 typedef std::map<const WorkerPool*, linked_ptr<TaskNamespace> > | |
86 TaskNamespaceMap; | |
87 | |
110 // Overridden from base::DelegateSimpleThread: | 88 // Overridden from base::DelegateSimpleThread: |
111 virtual void Run() OVERRIDE; | 89 virtual void Run() OVERRIDE; |
112 | 90 |
91 inline bool has_finished_running_tasks(TaskNamespace* task_namespace) { | |
92 return (task_namespace->pending_tasks.empty() && | |
93 task_namespace->running_tasks.empty()); | |
94 } | |
95 | |
113 // This lock protects all members of this class except | 96 // This lock protects all members of this class except |
114 // |worker_pool_on_origin_thread_|. Do not read or modify anything | 97 // |worker_pool_on_origin_thread_|. Do not read or modify anything |
115 // without holding this lock. Do not block while holding this lock. | 98 // without holding this lock. Do not block while holding this lock. |
116 mutable base::Lock lock_; | 99 mutable base::Lock lock_; |
117 | 100 |
118 // Condition variable that is waited on by worker threads until new | 101 // Condition variable that is waited on by worker threads until new |
119 // tasks are ready to run or shutdown starts. | 102 // tasks are ready to run or shutdown starts. |
120 base::ConditionVariable has_ready_to_run_tasks_cv_; | 103 base::ConditionVariable has_ready_to_run_tasks_cv_; |
121 | 104 |
105 // Condition variable that is waited on by origin threads until a | |
106 // namespace has finished running all associated tasks. | |
107 base::ConditionVariable has_namespaces_with_finished_running_tasks_cv_; | |
108 | |
122 // Provides each running thread loop with a unique index. First thread | 109 // Provides each running thread loop with a unique index. First thread |
123 // loop index is 0. | 110 // loop index is 0. |
124 unsigned next_thread_index_; | 111 unsigned next_thread_index_; |
125 | 112 |
126 // Set during shutdown. Tells workers to exit when no more tasks | 113 // Set during shutdown. Tells workers to exit when no more tasks |
127 // are pending. | 114 // are pending. |
128 bool shutdown_; | 115 bool shutdown_; |
129 | 116 |
130 // This set contains all pending tasks. | 117 // This set contains all registered namespaces. |
131 GraphNodeMap pending_tasks_; | 118 TaskNamespaceMap namespaces_; |
132 | 119 |
133 // Priority queue containing tasks that are ready to run. | 120 // Ordered set of task namespaces that have ready to run tasks. |
134 internal::GraphNode::Vector ready_to_run_tasks_; | 121 std::vector<TaskNamespace*> ready_to_run_namespaces_; |
135 | |
136 // This set contains all currently running tasks. | |
137 GraphNodeMap running_tasks_; | |
138 | |
139 // Completed tasks not yet collected by origin thread. | |
140 TaskVector completed_tasks_; | |
141 | 122 |
142 ScopedPtrDeque<base::DelegateSimpleThread> workers_; | 123 ScopedPtrDeque<base::DelegateSimpleThread> workers_; |
143 | 124 |
144 DISALLOW_COPY_AND_ASSIGN(Inner); | 125 DISALLOW_COPY_AND_ASSIGN(TaskGraphRunner); |
145 }; | 126 }; |
146 | 127 |
147 WorkerPool::Inner::Inner( | 128 TaskGraphRunner::TaskGraphRunner( |
148 size_t num_threads, const std::string& thread_name_prefix) | 129 size_t num_threads, const std::string& thread_name_prefix) |
149 : lock_(), | 130 : lock_(), |
150 has_ready_to_run_tasks_cv_(&lock_), | 131 has_ready_to_run_tasks_cv_(&lock_), |
132 has_namespaces_with_finished_running_tasks_cv_(&lock_), | |
151 next_thread_index_(0), | 133 next_thread_index_(0), |
152 shutdown_(false) { | 134 shutdown_(false) { |
153 base::AutoLock lock(lock_); | 135 base::AutoLock lock(lock_); |
154 | 136 |
155 while (workers_.size() < num_threads) { | 137 while (workers_.size() < num_threads) { |
156 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( | 138 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( |
157 new base::DelegateSimpleThread( | 139 new base::DelegateSimpleThread( |
158 this, | 140 this, |
159 thread_name_prefix + | 141 thread_name_prefix + |
160 base::StringPrintf( | 142 base::StringPrintf( |
161 "Worker%u", | 143 "Worker%u", |
162 static_cast<unsigned>(workers_.size() + 1)).c_str())); | 144 static_cast<unsigned>(workers_.size() + 1)).c_str())); |
163 worker->Start(); | 145 worker->Start(); |
164 #if defined(OS_ANDROID) || defined(OS_LINUX) | 146 #if defined(OS_ANDROID) || defined(OS_LINUX) |
165 worker->SetThreadPriority(base::kThreadPriority_Background); | 147 worker->SetThreadPriority(base::kThreadPriority_Background); |
166 #endif | 148 #endif |
167 workers_.push_back(worker.Pass()); | 149 workers_.push_back(worker.Pass()); |
168 } | 150 } |
169 } | 151 } |
170 | 152 |
171 WorkerPool::Inner::~Inner() { | 153 TaskGraphRunner::~TaskGraphRunner() { |
172 base::AutoLock lock(lock_); | |
173 | |
174 DCHECK(shutdown_); | |
175 | |
176 DCHECK_EQ(0u, pending_tasks_.size()); | |
177 DCHECK_EQ(0u, ready_to_run_tasks_.size()); | |
178 DCHECK_EQ(0u, running_tasks_.size()); | |
179 DCHECK_EQ(0u, completed_tasks_.size()); | |
180 } | |
181 | |
182 void WorkerPool::Inner::Shutdown() { | |
183 { | 154 { |
184 base::AutoLock lock(lock_); | 155 base::AutoLock lock(lock_); |
185 | 156 |
157 DCHECK_EQ(0u, ready_to_run_namespaces_.size()); | |
158 DCHECK_EQ(0u, namespaces_.size()); | |
159 | |
186 DCHECK(!shutdown_); | 160 DCHECK(!shutdown_); |
187 shutdown_ = true; | 161 shutdown_ = true; |
188 | 162 |
189 // Wake up a worker so it knows it should exit. This will cause all workers | 163 // Wake up a worker so it knows it should exit. This will cause all workers |
190 // to exit as each will wake up another worker before exiting. | 164 // to exit as each will wake up another worker before exiting. |
191 has_ready_to_run_tasks_cv_.Signal(); | 165 has_ready_to_run_tasks_cv_.Signal(); |
192 } | 166 } |
193 | 167 |
194 while (workers_.size()) { | 168 while (workers_.size()) { |
195 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); | 169 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); |
196 // http://crbug.com/240453 - Join() is considered IO and will block this | 170 // http://crbug.com/240453 - Join() is considered IO and will block this |
197 // thread. See also http://crbug.com/239423 for further ideas. | 171 // thread. See also http://crbug.com/239423 for further ideas. |
198 base::ThreadRestrictions::ScopedAllowIO allow_io; | 172 base::ThreadRestrictions::ScopedAllowIO allow_io; |
199 worker->Join(); | 173 worker->Join(); |
200 } | 174 } |
201 } | 175 } |
202 | 176 |
203 void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { | 177 void TaskGraphRunner::Register(const WorkerPool* worker_pool) { |
204 // It is OK to call SetTaskGraph() after shutdown if |graph| is empty. | 178 base::AutoLock lock(lock_); |
205 DCHECK(graph->empty() || !shutdown_); | |
206 | 179 |
207 GraphNodeMap new_pending_tasks; | 180 DCHECK(namespaces_.find(worker_pool) == namespaces_.end()); |
208 GraphNodeMap new_running_tasks; | 181 linked_ptr<TaskNamespace> task_set = make_linked_ptr(new TaskNamespace()); |
182 namespaces_[worker_pool] = task_set; | |
183 } | |
184 | |
185 void TaskGraphRunner::Unregister(const WorkerPool* worker_pool) { | |
186 base::AutoLock lock(lock_); | |
187 | |
188 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
189 DCHECK_EQ(0u, namespaces_[worker_pool]->pending_tasks.size()); | |
190 DCHECK_EQ(0u, namespaces_[worker_pool]->ready_to_run_tasks.size()); | |
191 DCHECK_EQ(0u, namespaces_[worker_pool]->running_tasks.size()); | |
192 DCHECK_EQ(0u, namespaces_[worker_pool]->completed_tasks.size()); | |
193 | |
194 namespaces_.erase(worker_pool); | |
195 } | |
196 | |
197 void TaskGraphRunner::WaitForTasksToFinishRunning( | |
198 const WorkerPool* worker_pool) { | |
199 base::AutoLock lock(lock_); | |
200 | |
201 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
202 TaskNamespace* task_namespace = namespaces_[worker_pool].get(); | |
203 | |
204 while (!has_finished_running_tasks(task_namespace)) | |
205 has_namespaces_with_finished_running_tasks_cv_.Wait(); | |
206 | |
207 // There may be other namespaces that have finished running | |
208 // tasks, so wake up another origin thread. | |
209 has_namespaces_with_finished_running_tasks_cv_.Signal(); | |
210 } | |
211 | |
212 void TaskGraphRunner::SetTaskGraph(const WorkerPool* worker_pool, | |
213 TaskGraph* graph) { | |
214 TaskGraph new_pending_tasks; | |
215 TaskGraph new_running_tasks; | |
209 | 216 |
210 new_pending_tasks.swap(*graph); | 217 new_pending_tasks.swap(*graph); |
211 | 218 |
212 { | 219 { |
213 base::AutoLock lock(lock_); | 220 base::AutoLock lock(lock_); |
214 | 221 |
222 DCHECK(!shutdown_); | |
223 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
224 TaskNamespace* task_namespace = namespaces_[worker_pool].get(); | |
225 | |
215 // First remove all completed tasks from |new_pending_tasks| and | 226 // First remove all completed tasks from |new_pending_tasks| and |
216 // adjust number of dependencies. | 227 // adjust number of dependencies. |
217 for (TaskVector::iterator it = completed_tasks_.begin(); | 228 for (TaskVector::iterator it = task_namespace->completed_tasks.begin(); |
218 it != completed_tasks_.end(); ++it) { | 229 it != task_namespace->completed_tasks.end(); ++it) { |
219 internal::WorkerPoolTask* task = it->get(); | 230 internal::WorkerPoolTask* task = it->get(); |
220 | 231 |
221 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( | 232 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( |
222 task); | 233 task); |
223 if (node) { | 234 if (node) { |
224 for (internal::GraphNode::Vector::const_iterator it = | 235 for (internal::GraphNode::Vector::const_iterator it = |
225 node->dependents().begin(); | 236 node->dependents().begin(); |
226 it != node->dependents().end(); ++it) { | 237 it != node->dependents().end(); ++it) { |
227 internal::GraphNode* dependent_node = *it; | 238 internal::GraphNode* dependent_node = *it; |
228 dependent_node->remove_dependency(); | 239 dependent_node->remove_dependency(); |
229 } | 240 } |
230 } | 241 } |
231 } | 242 } |
232 | 243 |
233 // Build new running task set. | 244 // Build new running task set. |
234 for (GraphNodeMap::iterator it = running_tasks_.begin(); | 245 for (TaskGraph::iterator it = task_namespace->running_tasks.begin(); |
235 it != running_tasks_.end(); ++it) { | 246 it != task_namespace->running_tasks.end(); ++it) { |
236 internal::WorkerPoolTask* task = it->first; | 247 internal::WorkerPoolTask* task = it->first; |
237 // Transfer scheduled task value from |new_pending_tasks| to | 248 // Transfer scheduled task value from |new_pending_tasks| to |
238 // |new_running_tasks| if currently running. Value must be set to | 249 // |new_running_tasks| if currently running. Value must be set to |
239 // NULL if |new_pending_tasks| doesn't contain task. This does | 250 // NULL if |new_pending_tasks| doesn't contain task. This does |
240 // the right in both cases. | 251 // the right in both cases. |
241 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); | 252 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); |
242 } | 253 } |
243 | 254 |
244 // Build new "ready to run" tasks queue. | 255 // Build new "ready to run" tasks queue. |
245 ready_to_run_tasks_.clear(); | 256 task_namespace->ready_to_run_tasks.clear(); |
246 for (GraphNodeMap::iterator it = new_pending_tasks.begin(); | 257 for (TaskGraph::iterator it = new_pending_tasks.begin(); |
247 it != new_pending_tasks.end(); ++it) { | 258 it != new_pending_tasks.end(); ++it) { |
248 internal::WorkerPoolTask* task = it->first; | 259 internal::WorkerPoolTask* task = it->first; |
249 DCHECK(task); | 260 DCHECK(task); |
250 internal::GraphNode* node = it->second; | 261 internal::GraphNode* node = it->second; |
251 | 262 |
252 // Completed tasks should not exist in |new_pending_tasks|. | 263 // Completed tasks should not exist in |new_pending_tasks|. |
253 DCHECK(!task->HasFinishedRunning()); | 264 DCHECK(!task->HasFinishedRunning()); |
254 | 265 |
255 // Call DidSchedule() to indicate that this task has been scheduled. | 266 // Call DidSchedule() to indicate that this task has been scheduled. |
256 // Note: This is only for debugging purposes. | 267 // Note: This is only for debugging purposes. |
257 task->DidSchedule(); | 268 task->DidSchedule(); |
258 | 269 |
259 if (!node->num_dependencies()) | 270 if (!node->num_dependencies()) |
260 ready_to_run_tasks_.push_back(node); | 271 task_namespace->ready_to_run_tasks.push_back(node); |
261 | 272 |
262 // Erase the task from old pending tasks. | 273 // Erase the task from old pending tasks. |
263 pending_tasks_.erase(task); | 274 task_namespace->pending_tasks.erase(task); |
264 } | 275 } |
265 | 276 |
266 // Rearrange the elements in |ready_to_run_tasks_| in such a way that | 277 // Rearrange the elements in |ready_to_run_tasks| in such a way that |
267 // they form a heap. | 278 // they form a heap. |
268 std::make_heap(ready_to_run_tasks_.begin(), | 279 std::make_heap(task_namespace->ready_to_run_tasks.begin(), |
269 ready_to_run_tasks_.end(), | 280 task_namespace->ready_to_run_tasks.end(), |
270 CompareTaskPriority); | 281 CompareTaskPriority); |
271 | 282 |
272 completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size()); | 283 task_namespace->completed_tasks.reserve( |
284 task_namespace->completed_tasks.size() + | |
285 task_namespace->pending_tasks.size()); | |
273 | 286 |
274 // The items left in |pending_tasks_| need to be canceled. | 287 // The items left in |pending_tasks| need to be canceled. |
275 for (GraphNodeMap::const_iterator it = pending_tasks_.begin(); | 288 for (TaskGraph::const_iterator it = task_namespace->pending_tasks.begin(); |
276 it != pending_tasks_.end(); | 289 it != task_namespace->pending_tasks.end(); ++it) { |
277 ++it) { | 290 task_namespace->completed_tasks.push_back(it->first); |
278 completed_tasks_.push_back(it->first); | |
279 } | 291 } |
280 | 292 |
281 // Swap task sets. | 293 // Swap task sets. |
282 // Note: old tasks are intentionally destroyed after releasing |lock_|. | 294 // Note: old tasks are intentionally destroyed after releasing |lock_|. |
283 pending_tasks_.swap(new_pending_tasks); | 295 task_namespace->pending_tasks.swap(new_pending_tasks); |
284 running_tasks_.swap(new_running_tasks); | 296 task_namespace->running_tasks.swap(new_running_tasks); |
285 | 297 |
286 // If |ready_to_run_tasks_| is empty, it means we either have | 298 // If |ready_to_run_tasks| is empty, it means we either have |
287 // running tasks, or we have no pending tasks. | 299 // running tasks, or we have no pending tasks. |
288 DCHECK(!ready_to_run_tasks_.empty() || | 300 DCHECK(!task_namespace->ready_to_run_tasks.empty() || |
289 (pending_tasks_.empty() || !running_tasks_.empty())); | 301 (task_namespace->pending_tasks.empty() || |
302 !task_namespace->running_tasks.empty())); | |
303 | |
304 // Build new "ready to run" task namespaces queue. | |
305 ready_to_run_namespaces_.clear(); | |
306 for (TaskNamespaceMap::iterator it = namespaces_.begin(); | |
307 it != namespaces_.end(); ++it) { | |
308 if (!it->second->ready_to_run_tasks.empty()) | |
309 ready_to_run_namespaces_.push_back(it->second.get()); | |
310 } | |
311 | |
312 // Rearrange the task namespaces in |ready_to_run_namespaces_| | |
313 // in such a way that they form a heap. | |
314 std::make_heap(ready_to_run_namespaces_.begin(), | |
315 ready_to_run_namespaces_.end(), | |
316 CompareTaskNamespacePriority); | |
290 | 317 |
291 // If there is more work available, wake up worker thread. | 318 // If there is more work available, wake up worker thread. |
292 if (!ready_to_run_tasks_.empty()) | 319 if (!ready_to_run_namespaces_.empty()) |
293 has_ready_to_run_tasks_cv_.Signal(); | 320 has_ready_to_run_tasks_cv_.Signal(); |
294 } | 321 } |
295 } | 322 } |
296 | 323 |
297 void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) { | 324 void TaskGraphRunner::CollectCompletedTasks( |
325 const WorkerPool* worker_pool, TaskVector* completed_tasks) { | |
298 base::AutoLock lock(lock_); | 326 base::AutoLock lock(lock_); |
299 | 327 |
300 DCHECK_EQ(0u, completed_tasks->size()); | 328 DCHECK_EQ(0u, completed_tasks->size()); |
301 completed_tasks->swap(completed_tasks_); | 329 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); |
330 completed_tasks->swap(namespaces_[worker_pool]->completed_tasks); | |
302 } | 331 } |
303 | 332 |
304 void WorkerPool::Inner::Run() { | 333 void TaskGraphRunner::Run() { |
305 base::AutoLock lock(lock_); | 334 base::AutoLock lock(lock_); |
306 | 335 |
307 // Get a unique thread index. | 336 // Get a unique thread index. |
308 int thread_index = next_thread_index_++; | 337 int thread_index = next_thread_index_++; |
309 | 338 |
310 while (true) { | 339 while (true) { |
311 if (ready_to_run_tasks_.empty()) { | 340 if (ready_to_run_namespaces_.empty()) { |
312 // Exit when shutdown is set and no more tasks are pending. | 341 // Exit when shutdown is set and no more tasks are pending. |
313 if (shutdown_ && pending_tasks_.empty()) | 342 if (shutdown_) |
314 break; | 343 break; |
315 | 344 |
316 // Wait for more tasks. | 345 // Wait for more tasks. |
317 has_ready_to_run_tasks_cv_.Wait(); | 346 has_ready_to_run_tasks_cv_.Wait(); |
318 continue; | 347 continue; |
319 } | 348 } |
320 | 349 |
321 // Take top priority task from |ready_to_run_tasks_|. | 350 // Take top priority TaskNamespace from |ready_to_run_namespaces_|. |
322 std::pop_heap(ready_to_run_tasks_.begin(), | 351 std::pop_heap(ready_to_run_namespaces_.begin(), |
323 ready_to_run_tasks_.end(), | 352 ready_to_run_namespaces_.end(), |
353 CompareTaskNamespacePriority); | |
354 TaskNamespace* task_namespace = ready_to_run_namespaces_.back(); | |
355 ready_to_run_namespaces_.pop_back(); | |
356 DCHECK(!task_namespace->ready_to_run_tasks.empty()); | |
357 | |
358 // Take top priority task from |ready_to_run_tasks|. | |
359 std::pop_heap(task_namespace->ready_to_run_tasks.begin(), | |
360 task_namespace->ready_to_run_tasks.end(), | |
324 CompareTaskPriority); | 361 CompareTaskPriority); |
325 scoped_refptr<internal::WorkerPoolTask> task( | 362 scoped_refptr<internal::WorkerPoolTask> task( |
326 ready_to_run_tasks_.back()->task()); | 363 task_namespace->ready_to_run_tasks.back()->task()); |
327 ready_to_run_tasks_.pop_back(); | 364 task_namespace->ready_to_run_tasks.pop_back(); |
328 | 365 |
329 // Move task from |pending_tasks_| to |running_tasks_|. | 366 // Add task namespace back to |ready_to_run_namespaces_| if not |
330 DCHECK(pending_tasks_.contains(task.get())); | 367 // empty after taking top priority task. |
331 DCHECK(!running_tasks_.contains(task.get())); | 368 if (!task_namespace->ready_to_run_tasks.empty()) { |
332 running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get())); | 369 ready_to_run_namespaces_.push_back(task_namespace); |
370 std::push_heap(ready_to_run_namespaces_.begin(), | |
371 ready_to_run_namespaces_.end(), | |
372 CompareTaskNamespacePriority); | |
373 } | |
374 | |
375 // Move task from |pending_tasks| to |running_tasks|. | |
376 DCHECK(task_namespace->pending_tasks.contains(task.get())); | |
377 DCHECK(!task_namespace->running_tasks.contains(task.get())); | |
378 task_namespace->running_tasks.set( | |
379 task.get(), | |
380 task_namespace->pending_tasks.take_and_erase(task.get())); | |
333 | 381 |
334 // There may be more work available, so wake up another worker thread. | 382 // There may be more work available, so wake up another worker thread. |
335 has_ready_to_run_tasks_cv_.Signal(); | 383 has_ready_to_run_tasks_cv_.Signal(); |
336 | 384 |
337 // Call WillRun() before releasing |lock_| and running task. | 385 // Call WillRun() before releasing |lock_| and running task. |
338 task->WillRun(); | 386 task->WillRun(); |
339 | 387 |
340 { | 388 { |
341 base::AutoUnlock unlock(lock_); | 389 base::AutoUnlock unlock(lock_); |
342 | 390 |
343 task->RunOnWorkerThread(thread_index); | 391 task->RunOnWorkerThread(thread_index); |
344 } | 392 } |
345 | 393 |
346 // This will mark task as finished running. | 394 // This will mark task as finished running. |
347 task->DidRun(); | 395 task->DidRun(); |
348 | 396 |
349 // Now iterate over all dependents to remove dependency and check | 397 // Now iterate over all dependents to remove dependency and check |
350 // if they are ready to run. | 398 // if they are ready to run. |
351 scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase( | 399 scoped_ptr<internal::GraphNode> node = |
352 task.get()); | 400 task_namespace->running_tasks.take_and_erase(task.get()); |
353 if (node) { | 401 if (node) { |
402 bool ready_to_run_namespaces_has_heap_properties = true; | |
403 | |
354 for (internal::GraphNode::Vector::const_iterator it = | 404 for (internal::GraphNode::Vector::const_iterator it = |
355 node->dependents().begin(); | 405 node->dependents().begin(); |
356 it != node->dependents().end(); ++it) { | 406 it != node->dependents().end(); ++it) { |
357 internal::GraphNode* dependent_node = *it; | 407 internal::GraphNode* dependent_node = *it; |
358 | 408 |
359 dependent_node->remove_dependency(); | 409 dependent_node->remove_dependency(); |
360 // Task is ready if it has no dependencies. Add it to | 410 // Task is ready if it has no dependencies. Add it to |
361 // |ready_to_run_tasks_|. | 411 // |ready_to_run_tasks_|. |
362 if (!dependent_node->num_dependencies()) { | 412 if (!dependent_node->num_dependencies()) { |
363 ready_to_run_tasks_.push_back(dependent_node); | 413 bool was_empty = task_namespace->ready_to_run_tasks.empty(); |
364 std::push_heap(ready_to_run_tasks_.begin(), | 414 task_namespace->ready_to_run_tasks.push_back(dependent_node); |
365 ready_to_run_tasks_.end(), | 415 std::push_heap(task_namespace->ready_to_run_tasks.begin(), |
416 task_namespace->ready_to_run_tasks.end(), | |
366 CompareTaskPriority); | 417 CompareTaskPriority); |
418 // Task namespace is ready if it has at least one ready | |
419 // to run task. Add it to |ready_to_run_namespaces_| if | |
420 // it just become ready. | |
421 if (was_empty) { | |
422 DCHECK(std::find(ready_to_run_namespaces_.begin(), | |
423 ready_to_run_namespaces_.end(), | |
424 task_namespace) == | |
425 ready_to_run_namespaces_.end()); | |
426 ready_to_run_namespaces_.push_back(task_namespace); | |
427 } | |
428 ready_to_run_namespaces_has_heap_properties = false; | |
367 } | 429 } |
368 } | 430 } |
431 | |
432 // Rearrange the task namespaces in |ready_to_run_namespaces_| | |
433 // in such a way that they yet again form a heap. | |
434 if (!ready_to_run_namespaces_has_heap_properties) { | |
435 std::make_heap(ready_to_run_namespaces_.begin(), | |
436 ready_to_run_namespaces_.end(), | |
437 CompareTaskNamespacePriority); | |
438 } | |
369 } | 439 } |
370 | 440 |
371 // Finally add task to |completed_tasks_|. | 441 // Finally add task to |completed_tasks_|. |
372 completed_tasks_.push_back(task); | 442 task_namespace->completed_tasks.push_back(task); |
443 | |
444 // If namespace has finished running all tasks, wake up origin thread. | |
445 if (has_finished_running_tasks(task_namespace)) | |
446 has_namespaces_with_finished_running_tasks_cv_.Signal(); | |
373 } | 447 } |
374 | 448 |
375 // We noticed we should exit. Wake up the next worker so it knows it should | 449 // We noticed we should exit. Wake up the next worker so it knows it should |
376 // exit as well (because the Shutdown() code only signals once). | 450 // exit as well (because the Shutdown() code only signals once). |
377 has_ready_to_run_tasks_cv_.Signal(); | 451 has_ready_to_run_tasks_cv_.Signal(); |
378 } | 452 } |
379 | 453 |
380 WorkerPool::WorkerPool(size_t num_threads, | 454 class CC_EXPORT CompositorRasterTaskGraphRunner |
381 const std::string& thread_name_prefix) | 455 : public TaskGraphRunner { |
382 : in_dispatch_completion_callbacks_(false), | 456 public: |
383 inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) { | 457 CompositorRasterTaskGraphRunner() : TaskGraphRunner( |
458 WorkerPool::GetNumRasterThreads(), "CompositorRaster") { | |
459 } | |
460 }; | |
461 | |
462 base::LazyInstance<CompositorRasterTaskGraphRunner> | |
463 g_task_graph_runner = LAZY_INSTANCE_INITIALIZER; | |
464 | |
465 } // namespace | |
466 | |
467 namespace internal { | |
468 | |
469 WorkerPoolTask::WorkerPoolTask() | |
470 : did_schedule_(false), | |
471 did_run_(false), | |
472 did_complete_(false) { | |
473 } | |
474 | |
475 WorkerPoolTask::~WorkerPoolTask() { | |
476 DCHECK_EQ(did_schedule_, did_complete_); | |
477 DCHECK(!did_run_ || did_schedule_); | |
478 DCHECK(!did_run_ || did_complete_); | |
479 } | |
480 | |
481 void WorkerPoolTask::DidSchedule() { | |
482 DCHECK(!did_complete_); | |
483 did_schedule_ = true; | |
484 } | |
485 | |
486 void WorkerPoolTask::WillRun() { | |
487 DCHECK(did_schedule_); | |
488 DCHECK(!did_complete_); | |
489 DCHECK(!did_run_); | |
490 } | |
491 | |
492 void WorkerPoolTask::DidRun() { | |
493 did_run_ = true; | |
494 } | |
495 | |
496 void WorkerPoolTask::WillComplete() { | |
497 DCHECK(!did_complete_); | |
498 } | |
499 | |
500 void WorkerPoolTask::DidComplete() { | |
501 DCHECK(did_schedule_); | |
502 DCHECK(!did_complete_); | |
503 did_complete_ = true; | |
504 } | |
505 | |
506 bool WorkerPoolTask::HasFinishedRunning() const { | |
507 return did_run_; | |
508 } | |
509 | |
510 bool WorkerPoolTask::HasCompleted() const { | |
511 return did_complete_; | |
512 } | |
513 | |
514 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) | |
515 : task_(task), | |
516 priority_(priority), | |
517 num_dependencies_(0) { | |
518 } | |
519 | |
520 GraphNode::~GraphNode() { | |
521 } | |
522 | |
523 } // namespace internal | |
524 | |
525 WorkerPool::WorkerPool() : in_dispatch_completion_callbacks_(false) { | |
526 g_task_graph_runner.Pointer()->Register(this); | |
384 } | 527 } |
385 | 528 |
386 WorkerPool::~WorkerPool() { | 529 WorkerPool::~WorkerPool() { |
530 g_task_graph_runner.Pointer()->Unregister(this); | |
387 } | 531 } |
388 | 532 |
389 void WorkerPool::Shutdown() { | 533 void WorkerPool::Shutdown() { |
390 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); | 534 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); |
391 | 535 |
392 DCHECK(!in_dispatch_completion_callbacks_); | 536 DCHECK(!in_dispatch_completion_callbacks_); |
393 | 537 |
394 inner_->Shutdown(); | 538 g_task_graph_runner.Pointer()->WaitForTasksToFinishRunning(this); |
395 } | |
396 | |
397 void WorkerPool::SetTaskGraph(TaskGraph* graph) { | |
398 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", | |
399 "num_tasks", graph->size()); | |
400 | |
401 DCHECK(!in_dispatch_completion_callbacks_); | |
402 | |
403 inner_->SetTaskGraph(graph); | |
404 } | 539 } |
405 | 540 |
406 void WorkerPool::CheckForCompletedWorkerTasks() { | 541 void WorkerPool::CheckForCompletedWorkerTasks() { |
407 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedWorkerTasks"); | 542 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedWorkerTasks"); |
408 | 543 |
409 DCHECK(!in_dispatch_completion_callbacks_); | 544 DCHECK(!in_dispatch_completion_callbacks_); |
410 | 545 |
411 TaskVector completed_tasks; | 546 TaskVector completed_tasks; |
412 inner_->CollectCompletedTasks(&completed_tasks); | 547 g_task_graph_runner.Pointer()->CollectCompletedTasks(this, &completed_tasks); |
413 ProcessCompletedTasks(completed_tasks); | 548 ProcessCompletedTasks(completed_tasks); |
414 } | 549 } |
415 | 550 |
416 void WorkerPool::ProcessCompletedTasks( | 551 void WorkerPool::ProcessCompletedTasks( |
417 const TaskVector& completed_tasks) { | 552 const TaskVector& completed_tasks) { |
418 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", | 553 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", |
419 "completed_task_count", completed_tasks.size()); | 554 "completed_task_count", completed_tasks.size()); |
420 | 555 |
421 // Worker pool instance is not reentrant while processing completed tasks. | 556 // Worker pool instance is not reentrant while processing completed tasks. |
422 in_dispatch_completion_callbacks_ = true; | 557 in_dispatch_completion_callbacks_ = true; |
423 | 558 |
424 for (TaskVector::const_iterator it = completed_tasks.begin(); | 559 for (TaskVector::const_iterator it = completed_tasks.begin(); |
425 it != completed_tasks.end(); | 560 it != completed_tasks.end(); |
426 ++it) { | 561 ++it) { |
427 internal::WorkerPoolTask* task = it->get(); | 562 internal::WorkerPoolTask* task = it->get(); |
428 | 563 |
429 task->WillComplete(); | 564 task->WillComplete(); |
430 task->CompleteOnOriginThread(); | 565 task->CompleteOnOriginThread(); |
431 task->DidComplete(); | 566 task->DidComplete(); |
432 } | 567 } |
433 | 568 |
434 in_dispatch_completion_callbacks_ = false; | 569 in_dispatch_completion_callbacks_ = false; |
435 } | 570 } |
436 | 571 |
572 | |
573 void WorkerPool::SetTaskGraph(TaskGraph* graph) { | |
reveman
2014/01/16 08:00:44
not sure why you move this to the bottom. please k
sohanjg
2014/01/16 08:56:49
Done.
| |
574 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", | |
575 "num_tasks", graph->size()); | |
576 | |
577 DCHECK(!in_dispatch_completion_callbacks_); | |
578 | |
579 g_task_graph_runner.Pointer()->SetTaskGraph(this, graph); | |
580 } | |
581 | |
437 } // namespace cc | 582 } // namespace cc |
OLD | NEW |