Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(19)

Side by Side Diff: cc/resources/worker_pool.cc

Issue 73923003: Shared Raster Worker Threads (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Code review changes Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/resources/worker_pool.h" 5 #include "cc/resources/worker_pool.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/containers/hash_tables.h" 10 #include "base/containers/hash_tables.h"
11 #include "base/debug/trace_event.h" 11 #include "base/debug/trace_event.h"
12 #include "base/lazy_instance.h"
13 #include "base/memory/linked_ptr.h"
12 #include "base/strings/stringprintf.h" 14 #include "base/strings/stringprintf.h"
13 #include "base/synchronization/condition_variable.h" 15 #include "base/synchronization/condition_variable.h"
14 #include "base/threading/simple_thread.h" 16 #include "base/threading/simple_thread.h"
15 #include "base/threading/thread_restrictions.h" 17 #include "base/threading/thread_restrictions.h"
16 #include "cc/base/scoped_ptr_deque.h" 18 #include "cc/base/scoped_ptr_deque.h"
19 #include "cc/base/switches.h"
17 20
18 namespace cc { 21 namespace cc {
19 22
20 namespace internal { 23 namespace {
21 24
22 WorkerPoolTask::WorkerPoolTask() 25 // TaskGraphRunners can process task graphs from multiple
23 : did_schedule_(false), 26 // workerpool instances. All members are guarded by |lock_|.
24 did_run_(false), 27 class TaskGraphRunner : public base::DelegateSimpleThread::Delegate {
25 did_complete_(false) { 28 public:
26 } 29 typedef WorkerPool::TaskGraph TaskGraph;
30 typedef WorkerPool::TaskVector TaskVector;
27 31
28 WorkerPoolTask::~WorkerPoolTask() { 32 TaskGraphRunner(size_t num_threads, const std::string& thread_name_prefix);
29 DCHECK_EQ(did_schedule_, did_complete_); 33 virtual ~TaskGraphRunner();
30 DCHECK(!did_run_ || did_schedule_);
31 DCHECK(!did_run_ || did_complete_);
32 }
33 34
34 void WorkerPoolTask::DidSchedule() { 35 void Register(const WorkerPool* worker_pool);
35 DCHECK(!did_complete_); 36 void Unregister(const WorkerPool* worker_pool);
36 did_schedule_ = true;
37 }
38
39 void WorkerPoolTask::WillRun() {
40 DCHECK(did_schedule_);
41 DCHECK(!did_complete_);
42 DCHECK(!did_run_);
43 }
44
45 void WorkerPoolTask::DidRun() {
46 did_run_ = true;
47 }
48
49 void WorkerPoolTask::WillComplete() {
50 DCHECK(!did_complete_);
51 }
52
53 void WorkerPoolTask::DidComplete() {
54 DCHECK(did_schedule_);
55 DCHECK(!did_complete_);
56 did_complete_ = true;
57 }
58
59 bool WorkerPoolTask::HasFinishedRunning() const {
60 return did_run_;
61 }
62
63 bool WorkerPoolTask::HasCompleted() const {
64 return did_complete_;
65 }
66
67 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority)
68 : task_(task),
69 priority_(priority),
70 num_dependencies_(0) {
71 }
72
73 GraphNode::~GraphNode() {
74 }
75
76 } // namespace internal
77
78 // Internal to the worker pool. Any data or logic that needs to be
79 // shared between threads lives in this class. All members are guarded
80 // by |lock_|.
81 class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
82 public:
83 Inner(size_t num_threads, const std::string& thread_name_prefix);
84 virtual ~Inner();
85
86 void Shutdown();
87
88 // Schedule running of tasks in |graph|. Tasks previously scheduled but 37 // Schedule running of tasks in |graph|. Tasks previously scheduled but
89 // no longer needed will be canceled unless already running. Canceled 38 // no longer needed will be canceled unless already running. Canceled
90 // tasks are moved to |completed_tasks_| without being run. The result 39 // tasks are moved to |completed_tasks| without being run. The result
91 // is that once scheduled, a task is guaranteed to end up in the 40 // is that once scheduled, a task is guaranteed to end up in the
92 // |completed_tasks_| queue even if they later get canceled by another 41 // |completed_tasks| queue even if it later get canceled by another
93 // call to SetTaskGraph(). 42 // call to SetTaskGraph().
94 void SetTaskGraph(TaskGraph* graph); 43 void SetTaskGraph(const WorkerPool* worker_pool, TaskGraph* graph);
44
45 // Wait for all scheduled tasks to finish running.
46 void WaitForTasksToFinishRunning(const WorkerPool* worker_pool);
95 47
96 // Collect all completed tasks in |completed_tasks|. 48 // Collect all completed tasks in |completed_tasks|.
97 void CollectCompletedTasks(TaskVector* completed_tasks); 49 void CollectCompletedTasks(const WorkerPool* worker_pool,
50 TaskVector* completed_tasks);
98 51
99 private: 52 private:
100 static bool CompareTaskPriority(const internal::GraphNode* a, 53 static bool CompareTaskPriority(const internal::GraphNode* a,
101 const internal::GraphNode* b) { 54 const internal::GraphNode* b) {
102 // In this system, numerically lower priority is run first. 55 // In this system, numerically lower priority is run first.
103 if (a->priority() != b->priority()) 56 if (a->priority() != b->priority())
104 return a->priority() > b->priority(); 57 return a->priority() > b->priority();
105 58
106 // Run task with most dependents first when priority is the same. 59 // Run task with most dependents first when priority is the same.
107 return a->dependents().size() < b->dependents().size(); 60 return a->dependents().size() < b->dependents().size();
108 } 61 }
109 62
63 struct TaskNamespace {
64 // This set contains all pending tasks.
65 TaskGraph pending_tasks;
66 // This set contains all currently running tasks.
67 TaskGraph running_tasks;
68 // Completed tasks not yet collected by origin thread.
69 TaskVector completed_tasks;
70 // Ordered set of tasks that are ready to run.
71 internal::GraphNode::Vector ready_to_run_tasks;
72 };
73
74 static bool CompareTaskNamespacePriority(const TaskNamespace* a,
75 const TaskNamespace* b) {
76 DCHECK(!a->ready_to_run_tasks.empty());
77 DCHECK(!b->ready_to_run_tasks.empty());
78
79 // Compare based on priority of the ready_to_run_tasks heap.
80 return CompareTaskPriority(a->ready_to_run_tasks.back(),
81 b->ready_to_run_tasks.back());
reveman 2014/01/07 17:24:06 you should be using .front() here as Vlad pointed
sohanjg 2014/01/08 05:55:43 Done. I missed that.
82 }
83
84 typedef std::map<const WorkerPool*, linked_ptr<TaskNamespace> >
85 TaskNamespaceMap;
86
110 // Overridden from base::DelegateSimpleThread: 87 // Overridden from base::DelegateSimpleThread:
111 virtual void Run() OVERRIDE; 88 virtual void Run() OVERRIDE;
112 89
90 inline bool has_finished_running_tasks(TaskNamespace* task_namespace) {
91 return (task_namespace->pending_tasks.empty() &&
92 task_namespace->running_tasks.empty());
93 }
94
113 // This lock protects all members of this class except 95 // This lock protects all members of this class except
114 // |worker_pool_on_origin_thread_|. Do not read or modify anything 96 // |worker_pool_on_origin_thread_|. Do not read or modify anything
115 // without holding this lock. Do not block while holding this lock. 97 // without holding this lock. Do not block while holding this lock.
116 mutable base::Lock lock_; 98 mutable base::Lock lock_;
117 99
118 // Condition variable that is waited on by worker threads until new 100 // Condition variable that is waited on by worker threads until new
119 // tasks are ready to run or shutdown starts. 101 // tasks are ready to run or shutdown starts.
120 base::ConditionVariable has_ready_to_run_tasks_cv_; 102 base::ConditionVariable has_ready_to_run_tasks_cv_;
121 103
104 // Condition variable that is waited on by origin threads until a
105 // namespace has finished running all associated tasks.
106 base::ConditionVariable has_namespaces_with_finished_running_tasks_cv_;
107
122 // Provides each running thread loop with a unique index. First thread 108 // Provides each running thread loop with a unique index. First thread
123 // loop index is 0. 109 // loop index is 0.
124 unsigned next_thread_index_; 110 unsigned next_thread_index_;
125 111
126 // Set during shutdown. Tells workers to exit when no more tasks 112 // Set during shutdown. Tells workers to exit when no more tasks
127 // are pending. 113 // are pending.
128 bool shutdown_; 114 bool shutdown_;
129 115
130 // This set contains all pending tasks. 116 // This set contains all registered namespaces.
131 GraphNodeMap pending_tasks_; 117 TaskNamespaceMap namespaces_;
132 118
133 // Priority queue containing tasks that are ready to run. 119 // Ordered set of tasks namespaces that have ready to run tasks.
134 internal::GraphNode::Vector ready_to_run_tasks_; 120 std::vector<TaskNamespace*> ready_to_run_namespaces_;
135
136 // This set contains all currently running tasks.
137 GraphNodeMap running_tasks_;
138
139 // Completed tasks not yet collected by origin thread.
140 TaskVector completed_tasks_;
141 121
142 ScopedPtrDeque<base::DelegateSimpleThread> workers_; 122 ScopedPtrDeque<base::DelegateSimpleThread> workers_;
143 123
144 DISALLOW_COPY_AND_ASSIGN(Inner); 124 DISALLOW_COPY_AND_ASSIGN(TaskGraphRunner);
145 }; 125 };
146 126
147 WorkerPool::Inner::Inner( 127 TaskGraphRunner::TaskGraphRunner(
148 size_t num_threads, const std::string& thread_name_prefix) 128 size_t num_threads, const std::string& thread_name_prefix)
149 : lock_(), 129 : lock_(),
150 has_ready_to_run_tasks_cv_(&lock_), 130 has_ready_to_run_tasks_cv_(&lock_),
131 has_namespaces_with_finished_running_tasks_cv_(&lock_),
151 next_thread_index_(0), 132 next_thread_index_(0),
152 shutdown_(false) { 133 shutdown_(false) {
153 base::AutoLock lock(lock_); 134 base::AutoLock lock(lock_);
154 135
155 while (workers_.size() < num_threads) { 136 while (workers_.size() < num_threads) {
156 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( 137 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr(
157 new base::DelegateSimpleThread( 138 new base::DelegateSimpleThread(
158 this, 139 this,
159 thread_name_prefix + 140 thread_name_prefix +
160 base::StringPrintf( 141 base::StringPrintf(
161 "Worker%u", 142 "Worker%u",
162 static_cast<unsigned>(workers_.size() + 1)).c_str())); 143 static_cast<unsigned>(workers_.size() + 1)).c_str()));
163 worker->Start(); 144 worker->Start();
164 #if defined(OS_ANDROID) || defined(OS_LINUX) 145 #if defined(OS_ANDROID) || defined(OS_LINUX)
165 worker->SetThreadPriority(base::kThreadPriority_Background); 146 worker->SetThreadPriority(base::kThreadPriority_Background);
166 #endif 147 #endif
167 workers_.push_back(worker.Pass()); 148 workers_.push_back(worker.Pass());
168 } 149 }
169 } 150 }
170 151
171 WorkerPool::Inner::~Inner() { 152 TaskGraphRunner::~TaskGraphRunner() {
172 base::AutoLock lock(lock_);
173
174 DCHECK(shutdown_);
175
176 DCHECK_EQ(0u, pending_tasks_.size());
177 DCHECK_EQ(0u, ready_to_run_tasks_.size());
178 DCHECK_EQ(0u, running_tasks_.size());
179 DCHECK_EQ(0u, completed_tasks_.size());
180 }
181
182 void WorkerPool::Inner::Shutdown() {
183 { 153 {
184 base::AutoLock lock(lock_); 154 base::AutoLock lock(lock_);
185 155
156 DCHECK_EQ(0u, ready_to_run_namespaces_.size());
157 DCHECK_EQ(0u, namespaces_.size());
158
186 DCHECK(!shutdown_); 159 DCHECK(!shutdown_);
187 shutdown_ = true; 160 shutdown_ = true;
188 161
189 // Wake up a worker so it knows it should exit. This will cause all workers 162 // Wake up a worker so it knows it should exit. This will cause all workers
190 // to exit as each will wake up another worker before exiting. 163 // to exit as each will wake up another worker before exiting.
191 has_ready_to_run_tasks_cv_.Signal(); 164 has_ready_to_run_tasks_cv_.Signal();
192 } 165 }
193 166
194 while (workers_.size()) { 167 while (workers_.size()) {
195 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); 168 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front();
196 // http://crbug.com/240453 - Join() is considered IO and will block this 169 // http://crbug.com/240453 - Join() is considered IO and will block this
197 // thread. See also http://crbug.com/239423 for further ideas. 170 // thread. See also http://crbug.com/239423 for further ideas.
198 base::ThreadRestrictions::ScopedAllowIO allow_io; 171 base::ThreadRestrictions::ScopedAllowIO allow_io;
199 worker->Join(); 172 worker->Join();
200 } 173 }
201 } 174 }
202 175
203 void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { 176 void TaskGraphRunner::Register(const WorkerPool* worker_pool) {
204 // It is OK to call SetTaskGraph() after shutdown if |graph| is empty. 177 base::AutoLock lock(lock_);
205 DCHECK(graph->empty() || !shutdown_);
206 178
207 GraphNodeMap new_pending_tasks; 179 DCHECK(namespaces_.find(worker_pool) == namespaces_.end());
208 GraphNodeMap new_running_tasks; 180 linked_ptr<TaskNamespace> task_set = make_linked_ptr(new TaskNamespace());
181 namespaces_[worker_pool] = task_set;
182 }
183
184 void TaskGraphRunner::Unregister(const WorkerPool* worker_pool) {
185 base::AutoLock lock(lock_);
186
187 DCHECK(namespaces_.find(worker_pool) != namespaces_.end());
188 DCHECK_EQ(0u, namespaces_[worker_pool]->pending_tasks.size());
189 DCHECK_EQ(0u, namespaces_[worker_pool]->ready_to_run_tasks.size());
190 DCHECK_EQ(0u, namespaces_[worker_pool]->running_tasks.size());
191 DCHECK_EQ(0u, namespaces_[worker_pool]->completed_tasks.size());
192
193 namespaces_.erase(worker_pool);
194 }
195
196 void TaskGraphRunner::WaitForTasksToFinishRunning(
197 const WorkerPool* worker_pool) {
198 base::AutoLock lock(lock_);
199
200 DCHECK(namespaces_.find(worker_pool) != namespaces_.end());
201 TaskNamespace* task_namespace = namespaces_[worker_pool].get();
202
203 while (!has_finished_running_tasks(task_namespace))
204 has_namespaces_with_finished_running_tasks_cv_.Wait();
205
206 // There may be other namespaces that have finished running
207 // tasks, so wake up another origin thread.
208 has_namespaces_with_finished_running_tasks_cv_.Signal();
209 }
210
211 void TaskGraphRunner::SetTaskGraph(const WorkerPool* worker_pool,
212 TaskGraph* graph) {
213 TaskGraph new_pending_tasks;
214 TaskGraph new_running_tasks;
209 215
210 new_pending_tasks.swap(*graph); 216 new_pending_tasks.swap(*graph);
211 217
212 { 218 {
213 base::AutoLock lock(lock_); 219 base::AutoLock lock(lock_);
214 220
221 DCHECK(!shutdown_);
222 DCHECK(namespaces_.find(worker_pool) != namespaces_.end());
223 TaskNamespace* task_namespace = namespaces_[worker_pool].get();
224
215 // First remove all completed tasks from |new_pending_tasks| and 225 // First remove all completed tasks from |new_pending_tasks| and
216 // adjust number of dependencies. 226 // adjust number of dependencies.
217 for (TaskVector::iterator it = completed_tasks_.begin(); 227 for (TaskVector::iterator it = task_namespace->completed_tasks.begin();
218 it != completed_tasks_.end(); ++it) { 228 it != task_namespace->completed_tasks.end(); ++it) {
219 internal::WorkerPoolTask* task = it->get(); 229 internal::WorkerPoolTask* task = it->get();
220 230
221 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( 231 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase(
222 task); 232 task);
223 if (node) { 233 if (node) {
224 for (internal::GraphNode::Vector::const_iterator it = 234 for (internal::GraphNode::Vector::const_iterator it =
225 node->dependents().begin(); 235 node->dependents().begin();
226 it != node->dependents().end(); ++it) { 236 it != node->dependents().end(); ++it) {
227 internal::GraphNode* dependent_node = *it; 237 internal::GraphNode* dependent_node = *it;
228 dependent_node->remove_dependency(); 238 dependent_node->remove_dependency();
229 } 239 }
230 } 240 }
231 } 241 }
232 242
233 // Build new running task set. 243 // Build new running task set.
234 for (GraphNodeMap::iterator it = running_tasks_.begin(); 244 for (TaskGraph::iterator it = task_namespace->running_tasks.begin();
235 it != running_tasks_.end(); ++it) { 245 it != task_namespace->running_tasks.end(); ++it) {
236 internal::WorkerPoolTask* task = it->first; 246 internal::WorkerPoolTask* task = it->first;
237 // Transfer scheduled task value from |new_pending_tasks| to 247 // Transfer scheduled task value from |new_pending_tasks| to
238 // |new_running_tasks| if currently running. Value must be set to 248 // |new_running_tasks| if currently running. Value must be set to
239 // NULL if |new_pending_tasks| doesn't contain task. This does 249 // NULL if |new_pending_tasks| doesn't contain task. This does
240 // the right in both cases. 250 // the right in both cases.
241 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); 251 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task));
242 } 252 }
243 253
244 // Build new "ready to run" tasks queue. 254 // Build new "ready to run" tasks queue.
245 ready_to_run_tasks_.clear(); 255 task_namespace->ready_to_run_tasks.clear();
246 for (GraphNodeMap::iterator it = new_pending_tasks.begin(); 256 for (TaskGraph::iterator it = new_pending_tasks.begin();
247 it != new_pending_tasks.end(); ++it) { 257 it != new_pending_tasks.end(); ++it) {
248 internal::WorkerPoolTask* task = it->first; 258 internal::WorkerPoolTask* task = it->first;
249 DCHECK(task); 259 DCHECK(task);
250 internal::GraphNode* node = it->second; 260 internal::GraphNode* node = it->second;
251 261
252 // Completed tasks should not exist in |new_pending_tasks|. 262 // Completed tasks should not exist in |new_pending_tasks|.
253 DCHECK(!task->HasFinishedRunning()); 263 DCHECK(!task->HasFinishedRunning());
254 264
255 // Call DidSchedule() to indicate that this task has been scheduled. 265 // Call DidSchedule() to indicate that this task has been scheduled.
256 // Note: This is only for debugging purposes. 266 // Note: This is only for debugging purposes.
257 task->DidSchedule(); 267 task->DidSchedule();
258 268
259 if (!node->num_dependencies()) 269 if (!node->num_dependencies())
260 ready_to_run_tasks_.push_back(node); 270 task_namespace->ready_to_run_tasks.push_back(node);
261 271
262 // Erase the task from old pending tasks. 272 // Erase the task from old pending tasks.
263 pending_tasks_.erase(task); 273 task_namespace->pending_tasks.erase(task);
264 } 274 }
265 275
266 // Rearrange the elements in |ready_to_run_tasks_| in such a way that 276 // Rearrange the elements in |ready_to_run_tasks| in such a way that
267 // they form a heap. 277 // they form a heap.
268 std::make_heap(ready_to_run_tasks_.begin(), 278 std::make_heap(task_namespace->ready_to_run_tasks.begin(),
269 ready_to_run_tasks_.end(), 279 task_namespace->ready_to_run_tasks.end(),
270 CompareTaskPriority); 280 CompareTaskPriority);
271 281
272 completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size()); 282 task_namespace->completed_tasks.reserve(
283 task_namespace->completed_tasks.size() +
284 task_namespace->pending_tasks.size());
273 285
274 // The items left in |pending_tasks_| need to be canceled. 286 // The items left in |pending_tasks| need to be canceled.
275 for (GraphNodeMap::const_iterator it = pending_tasks_.begin(); 287 for (TaskGraph::const_iterator it = task_namespace->pending_tasks.begin();
276 it != pending_tasks_.end(); 288 it != task_namespace->pending_tasks.end(); ++it) {
277 ++it) { 289 task_namespace->completed_tasks.push_back(it->first);
278 completed_tasks_.push_back(it->first);
279 } 290 }
280 291
281 // Swap task sets. 292 // Swap task sets.
282 // Note: old tasks are intentionally destroyed after releasing |lock_|. 293 // Note: old tasks are intentionally destroyed after releasing |lock_|.
283 pending_tasks_.swap(new_pending_tasks); 294 task_namespace->pending_tasks.swap(new_pending_tasks);
284 running_tasks_.swap(new_running_tasks); 295 task_namespace->running_tasks.swap(new_running_tasks);
285 296
286 // If |ready_to_run_tasks_| is empty, it means we either have 297 // If |ready_to_run_tasks| is empty, it means we either have
287 // running tasks, or we have no pending tasks. 298 // running tasks, or we have no pending tasks.
288 DCHECK(!ready_to_run_tasks_.empty() || 299 DCHECK(!task_namespace->ready_to_run_tasks.empty() ||
289 (pending_tasks_.empty() || !running_tasks_.empty())); 300 (task_namespace->pending_tasks.empty() ||
301 !task_namespace->running_tasks.empty()));
302
303 // Build new "ready to run" task namespaces queue.
304 ready_to_run_namespaces_.clear();
305 for (TaskNamespaceMap::iterator it = namespaces_.begin();
306 it != namespaces_.end(); ++it) {
307 if (!it->second->ready_to_run_tasks.empty())
308 ready_to_run_namespaces_.push_back(it->second.get());
309 }
310
311 // Rearrange the task namespaces in |ready_to_run_namespaces_|
312 // in such a way that they form a heap.
313 std::make_heap(ready_to_run_namespaces_.begin(),
314 ready_to_run_namespaces_.end(),
315 CompareTaskNamespacePriority);
290 316
291 // If there is more work available, wake up worker thread. 317 // If there is more work available, wake up worker thread.
292 if (!ready_to_run_tasks_.empty()) 318 if (!ready_to_run_namespaces_.empty())
293 has_ready_to_run_tasks_cv_.Signal(); 319 has_ready_to_run_tasks_cv_.Signal();
294 } 320 }
295 } 321 }
296 322
297 void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) { 323 void TaskGraphRunner::CollectCompletedTasks(
324 const WorkerPool* worker_pool, TaskVector* completed_tasks) {
298 base::AutoLock lock(lock_); 325 base::AutoLock lock(lock_);
299 326
300 DCHECK_EQ(0u, completed_tasks->size()); 327 DCHECK_EQ(0u, completed_tasks->size());
301 completed_tasks->swap(completed_tasks_); 328 DCHECK(namespaces_.find(worker_pool) != namespaces_.end());
329 completed_tasks->swap(namespaces_[worker_pool]->completed_tasks);
302 } 330 }
303 331
304 void WorkerPool::Inner::Run() { 332 void TaskGraphRunner::Run() {
305 base::AutoLock lock(lock_); 333 base::AutoLock lock(lock_);
306 334
307 // Get a unique thread index. 335 // Get a unique thread index.
308 int thread_index = next_thread_index_++; 336 int thread_index = next_thread_index_++;
309 337
310 while (true) { 338 while (true) {
311 if (ready_to_run_tasks_.empty()) { 339 if (ready_to_run_namespaces_.empty()) {
312 // Exit when shutdown is set and no more tasks are pending. 340 // Exit when shutdown is set and no more tasks are pending.
313 if (shutdown_ && pending_tasks_.empty()) 341 if (shutdown_)
314 break; 342 break;
315 343
316 // Wait for more tasks. 344 // Wait for more tasks.
317 has_ready_to_run_tasks_cv_.Wait(); 345 has_ready_to_run_tasks_cv_.Wait();
318 continue; 346 continue;
319 } 347 }
320 348
321 // Take top priority task from |ready_to_run_tasks_|. 349 // Take top priority TaskNamespace from |ready_to_run_namespaces_|.
322 std::pop_heap(ready_to_run_tasks_.begin(), 350 std::pop_heap(ready_to_run_namespaces_.begin(),
323 ready_to_run_tasks_.end(), 351 ready_to_run_namespaces_.end(),
352 CompareTaskNamespacePriority);
353 TaskNamespace* task_namespace = ready_to_run_namespaces_.back();
354 ready_to_run_namespaces_.pop_back();
355 DCHECK(!task_namespace->ready_to_run_tasks.empty());
356
357 // Take top priority task from |ready_to_run_tasks|.
358 std::pop_heap(task_namespace->ready_to_run_tasks.begin(),
359 task_namespace->ready_to_run_tasks.end(),
324 CompareTaskPriority); 360 CompareTaskPriority);
325 scoped_refptr<internal::WorkerPoolTask> task( 361 scoped_refptr<internal::WorkerPoolTask> task(
326 ready_to_run_tasks_.back()->task()); 362 task_namespace->ready_to_run_tasks.back()->task());
327 ready_to_run_tasks_.pop_back(); 363 task_namespace->ready_to_run_tasks.pop_back();
328 364
329 // Move task from |pending_tasks_| to |running_tasks_|. 365 // Add task namespace back to |ready_to_run_namespaces_| if not
330 DCHECK(pending_tasks_.contains(task.get())); 366 // empty after taking top priority task.
331 DCHECK(!running_tasks_.contains(task.get())); 367 if (!task_namespace->ready_to_run_tasks.empty()) {
332 running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get())); 368 ready_to_run_namespaces_.push_back(task_namespace);
369 std::push_heap(ready_to_run_namespaces_.begin(),
370 ready_to_run_namespaces_.end(),
371 CompareTaskNamespacePriority);
372 }
373
374 // Move task from |pending_tasks| to |running_tasks|.
375 DCHECK(task_namespace->pending_tasks.contains(task.get()));
376 DCHECK(!task_namespace->running_tasks.contains(task.get()));
377 task_namespace->running_tasks.set(
378 task.get(),
379 task_namespace->pending_tasks.take_and_erase(task.get()));
333 380
334 // There may be more work available, so wake up another worker thread. 381 // There may be more work available, so wake up another worker thread.
335 has_ready_to_run_tasks_cv_.Signal(); 382 has_ready_to_run_tasks_cv_.Signal();
336 383
337 // Call WillRun() before releasing |lock_| and running task. 384 // Call WillRun() before releasing |lock_| and running task.
338 task->WillRun(); 385 task->WillRun();
339 386
340 { 387 {
341 base::AutoUnlock unlock(lock_); 388 base::AutoUnlock unlock(lock_);
342 389
343 task->RunOnWorkerThread(thread_index); 390 task->RunOnWorkerThread(thread_index);
344 } 391 }
345 392
346 // This will mark task as finished running. 393 // This will mark task as finished running.
347 task->DidRun(); 394 task->DidRun();
348 395
349 // Now iterate over all dependents to remove dependency and check 396 // Now iterate over all dependents to remove dependency and check
350 // if they are ready to run. 397 // if they are ready to run.
351 scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase( 398 scoped_ptr<internal::GraphNode> node =
352 task.get()); 399 task_namespace->running_tasks.take_and_erase(task.get());
353 if (node) { 400 if (node) {
401 bool ready_to_run_namespaces_has_heap_properties = true;
reveman 2014/01/07 17:24:06 nit: I'd prefer add a blank line after this
sohanjg 2014/01/08 05:55:43 Done.
354 for (internal::GraphNode::Vector::const_iterator it = 402 for (internal::GraphNode::Vector::const_iterator it =
355 node->dependents().begin(); 403 node->dependents().begin();
356 it != node->dependents().end(); ++it) { 404 it != node->dependents().end(); ++it) {
357 internal::GraphNode* dependent_node = *it; 405 internal::GraphNode* dependent_node = *it;
358 406
359 dependent_node->remove_dependency(); 407 dependent_node->remove_dependency();
360 // Task is ready if it has no dependencies. Add it to 408 // Task is ready if it has no dependencies. Add it to
361 // |ready_to_run_tasks_|. 409 // |ready_to_run_tasks_|.
362 if (!dependent_node->num_dependencies()) { 410 if (!dependent_node->num_dependencies()) {
363 ready_to_run_tasks_.push_back(dependent_node); 411 bool was_empty = task_namespace->ready_to_run_tasks.empty();
364 std::push_heap(ready_to_run_tasks_.begin(), 412 task_namespace->ready_to_run_tasks.push_back(dependent_node);
365 ready_to_run_tasks_.end(), 413 std::push_heap(task_namespace->ready_to_run_tasks.begin(),
414 task_namespace->ready_to_run_tasks.end(),
366 CompareTaskPriority); 415 CompareTaskPriority);
416 // Task namespace is ready if it has at least one ready
417 // to run task. Add it to |ready_to_run_namespaces_| if
418 // it just become ready.
419 if (was_empty) {
420 DCHECK(std::find(ready_to_run_namespaces_.begin(),
421 ready_to_run_namespaces_.end(),
422 task_namespace) ==
423 ready_to_run_namespaces_.end());
424 ready_to_run_namespaces_.push_back(task_namespace);
425 ready_to_run_namespaces_has_heap_properties = false;
reveman 2014/01/07 17:24:06 this needs to be set to false even when "was_empty
426 }
367 } 427 }
368 } 428 }
429 if (!ready_to_run_namespaces_has_heap_properties)
reveman 2014/01/07 17:24:06 nit: blank line before this please. add this comm
430 std::make_heap(ready_to_run_namespaces_.begin(),
431 ready_to_run_namespaces_.end(),
432 CompareTaskNamespacePriority);
reveman 2014/01/07 17:24:06 nit: please use "{" "}" as this is a multiple line
369 } 433 }
370 434
371 // Finally add task to |completed_tasks_|. 435 // Finally add task to |completed_tasks_|.
372 completed_tasks_.push_back(task); 436 task_namespace->completed_tasks.push_back(task);
437
438 // If namespace has finished running all tasks, wake up origin thread.
439 if (has_finished_running_tasks(task_namespace))
440 has_namespaces_with_finished_running_tasks_cv_.Signal();
373 } 441 }
374 442
375 // We noticed we should exit. Wake up the next worker so it knows it should 443 // We noticed we should exit. Wake up the next worker so it knows it should
376 // exit as well (because the Shutdown() code only signals once). 444 // exit as well (because the Shutdown() code only signals once).
377 has_ready_to_run_tasks_cv_.Signal(); 445 has_ready_to_run_tasks_cv_.Signal();
378 } 446 }
379 447
380 WorkerPool::WorkerPool(size_t num_threads, 448 class CC_EXPORT CompositorRasterTaskGraphRunner
381 const std::string& thread_name_prefix) 449 : public TaskGraphRunner {
382 : in_dispatch_completion_callbacks_(false), 450 public:
383 inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) { 451 CompositorRasterTaskGraphRunner() : TaskGraphRunner(
452 WorkerPool::GetNumRasterThreads(), "CompositorRaster") {
453 }
454 };
455
456 base::LazyInstance<CompositorRasterTaskGraphRunner>
457 g_task_graph_runner = LAZY_INSTANCE_INITIALIZER;
458
459 } // namespace
460
461 namespace internal {
462
463 WorkerPoolTask::WorkerPoolTask()
464 : did_schedule_(false),
465 did_run_(false),
466 did_complete_(false) {
467 }
468
469 WorkerPoolTask::~WorkerPoolTask() {
470 DCHECK_EQ(did_schedule_, did_complete_);
471 DCHECK(!did_run_ || did_schedule_);
472 DCHECK(!did_run_ || did_complete_);
473 }
474
475 void WorkerPoolTask::DidSchedule() {
476 DCHECK(!did_complete_);
477 did_schedule_ = true;
478 }
479
480 void WorkerPoolTask::WillRun() {
481 DCHECK(did_schedule_);
482 DCHECK(!did_complete_);
483 DCHECK(!did_run_);
484 }
485
486 void WorkerPoolTask::DidRun() {
487 did_run_ = true;
488 }
489
490 void WorkerPoolTask::WillComplete() {
491 DCHECK(!did_complete_);
492 }
493
494 void WorkerPoolTask::DidComplete() {
495 DCHECK(did_schedule_);
496 DCHECK(!did_complete_);
497 did_complete_ = true;
498 }
499
500 bool WorkerPoolTask::HasFinishedRunning() const {
501 return did_run_;
502 }
503
504 bool WorkerPoolTask::HasCompleted() const {
505 return did_complete_;
506 }
507
508 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority)
509 : task_(task),
510 priority_(priority),
511 num_dependencies_(0) {
512 }
513
514 GraphNode::~GraphNode() {
515 }
516
517 } // namespace internal
518
519 WorkerPool::WorkerPool() : in_dispatch_completion_callbacks_(false) {
520 g_task_graph_runner.Pointer()->Register(this);
384 } 521 }
385 522
386 WorkerPool::~WorkerPool() { 523 WorkerPool::~WorkerPool() {
524 g_task_graph_runner.Pointer()->Unregister(this);
387 } 525 }
388 526
389 void WorkerPool::Shutdown() { 527 void WorkerPool::Shutdown() {
390 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); 528 TRACE_EVENT0("cc", "WorkerPool::Shutdown");
391 529
392 DCHECK(!in_dispatch_completion_callbacks_); 530 DCHECK(!in_dispatch_completion_callbacks_);
393 531
394 inner_->Shutdown(); 532 g_task_graph_runner.Pointer()->WaitForTasksToFinishRunning(this);
395 } 533 }
396 534
397 void WorkerPool::CheckForCompletedTasks() { 535 void WorkerPool::CheckForCompletedTasks() {
398 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); 536 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks");
399 537
400 DCHECK(!in_dispatch_completion_callbacks_); 538 DCHECK(!in_dispatch_completion_callbacks_);
401 539
402 TaskVector completed_tasks; 540 TaskVector completed_tasks;
403 inner_->CollectCompletedTasks(&completed_tasks); 541 g_task_graph_runner.Pointer()->CollectCompletedTasks(this, &completed_tasks);
404 ProcessCompletedTasks(completed_tasks); 542 ProcessCompletedTasks(completed_tasks);
405 } 543 }
406 544
407 void WorkerPool::ProcessCompletedTasks( 545 void WorkerPool::ProcessCompletedTasks(
408 const TaskVector& completed_tasks) { 546 const TaskVector& completed_tasks) {
409 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", 547 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks",
410 "completed_task_count", completed_tasks.size()); 548 "completed_task_count", completed_tasks.size());
411 549
412 // Worker pool instance is not reentrant while processing completed tasks. 550 // Worker pool instance is not reentrant while processing completed tasks.
413 in_dispatch_completion_callbacks_ = true; 551 in_dispatch_completion_callbacks_ = true;
(...skipping 10 matching lines...) Expand all
424 562
425 in_dispatch_completion_callbacks_ = false; 563 in_dispatch_completion_callbacks_ = false;
426 } 564 }
427 565
428 void WorkerPool::SetTaskGraph(TaskGraph* graph) { 566 void WorkerPool::SetTaskGraph(TaskGraph* graph) {
429 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", 567 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph",
430 "num_tasks", graph->size()); 568 "num_tasks", graph->size());
431 569
432 DCHECK(!in_dispatch_completion_callbacks_); 570 DCHECK(!in_dispatch_completion_callbacks_);
433 571
434 inner_->SetTaskGraph(graph); 572 g_task_graph_runner.Pointer()->SetTaskGraph(this, graph);
435 } 573 }
436 574
437 } // namespace cc 575 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698