OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "cc/resources/worker_pool.h" | |
6 | |
7 #include <algorithm> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/containers/hash_tables.h" | |
11 #include "base/debug/trace_event.h" | |
12 #include "base/lazy_instance.h" | |
13 #include "base/memory/linked_ptr.h" | |
14 #include "base/strings/stringprintf.h" | |
15 #include "base/synchronization/condition_variable.h" | |
16 #include "base/threading/simple_thread.h" | |
17 #include "base/threading/thread_restrictions.h" | |
18 #include "cc/base/scoped_ptr_deque.h" | |
19 | |
20 namespace cc { | |
21 | |
22 namespace { | |
23 | |
24 // TaskGraphRunners can process task graphs from multiple | |
25 // workerpool instances. All members are guarded by |lock_|. | |
26 class TaskGraphRunner : public base::DelegateSimpleThread::Delegate { | |
27 public: | |
28 typedef WorkerPool::TaskGraph TaskGraph; | |
29 typedef WorkerPool::TaskVector TaskVector; | |
30 | |
31 TaskGraphRunner(size_t num_threads, const std::string& thread_name_prefix); | |
32 virtual ~TaskGraphRunner(); | |
33 | |
34 void Register(const WorkerPool* worker_pool); | |
35 void Unregister(const WorkerPool* worker_pool); | |
36 // Schedule running of tasks in |graph|. Tasks previously scheduled but | |
37 // no longer needed will be canceled unless already running. Canceled | |
38 // tasks are moved to |completed_tasks| without being run. The result | |
39 // is that once scheduled, a task is guaranteed to end up in the | |
40 // |completed_tasks| queue even if it later get canceled by another | |
41 // call to SetTaskGraph(). | |
42 void SetTaskGraph(const WorkerPool* worker_pool, TaskGraph* graph); | |
43 | |
44 // Wait for all scheduled tasks to finish running. | |
45 void WaitForTasksToFinishRunning(const WorkerPool* worker_pool); | |
46 | |
47 // Collect all completed tasks in |completed_tasks|. | |
48 void CollectCompletedTasks(const WorkerPool* worker_pool, | |
49 TaskVector* completed_tasks); | |
50 | |
51 private: | |
52 static bool CompareTaskPriority(const internal::GraphNode* a, | |
53 const internal::GraphNode* b) { | |
54 // In this system, numerically lower priority is run first. | |
55 if (a->priority() != b->priority()) | |
56 return a->priority() > b->priority(); | |
57 | |
58 // Run task with most dependents first when priority is the same. | |
59 return a->dependents().size() < b->dependents().size(); | |
60 } | |
61 | |
62 struct TaskNamespace { | |
63 // This set contains all pending tasks. | |
64 TaskGraph pending_tasks; | |
65 // This set contains all currently running tasks. | |
66 TaskGraph running_tasks; | |
67 // Completed tasks not yet collected by origin thread. | |
68 TaskVector completed_tasks; | |
69 // Ordered set of tasks that are ready to run. | |
70 internal::GraphNode::Vector ready_to_run_tasks; | |
71 }; | |
72 | |
73 static bool CompareTaskNamespacePriority(const TaskNamespace* a, | |
74 const TaskNamespace* b) { | |
75 DCHECK(!a->ready_to_run_tasks.empty()); | |
76 DCHECK(!b->ready_to_run_tasks.empty()); | |
77 | |
78 // Compare based on task priority of the ready_to_run_tasks heap | |
79 // .front() will hold the max element of the heap, | |
80 // except after pop_heap, when max element is moved to .back(). | |
81 return CompareTaskPriority(a->ready_to_run_tasks.front(), | |
82 b->ready_to_run_tasks.front()); | |
83 } | |
84 | |
85 typedef std::map<const WorkerPool*, linked_ptr<TaskNamespace> > | |
86 TaskNamespaceMap; | |
87 | |
88 // Overridden from base::DelegateSimpleThread: | |
89 virtual void Run() OVERRIDE; | |
90 | |
91 inline bool has_finished_running_tasks(TaskNamespace* task_namespace) { | |
92 return (task_namespace->pending_tasks.empty() && | |
93 task_namespace->running_tasks.empty()); | |
94 } | |
95 | |
96 // This lock protects all members of this class except | |
97 // |worker_pool_on_origin_thread_|. Do not read or modify anything | |
98 // without holding this lock. Do not block while holding this lock. | |
99 mutable base::Lock lock_; | |
100 | |
101 // Condition variable that is waited on by worker threads until new | |
102 // tasks are ready to run or shutdown starts. | |
103 base::ConditionVariable has_ready_to_run_tasks_cv_; | |
104 | |
105 // Condition variable that is waited on by origin threads until a | |
106 // namespace has finished running all associated tasks. | |
107 base::ConditionVariable has_namespaces_with_finished_running_tasks_cv_; | |
108 | |
109 // Provides each running thread loop with a unique index. First thread | |
110 // loop index is 0. | |
111 unsigned next_thread_index_; | |
112 | |
113 // Set during shutdown. Tells workers to exit when no more tasks | |
114 // are pending. | |
115 bool shutdown_; | |
116 | |
117 // This set contains all registered namespaces. | |
118 TaskNamespaceMap namespaces_; | |
119 | |
120 // Ordered set of task namespaces that have ready to run tasks. | |
121 std::vector<TaskNamespace*> ready_to_run_namespaces_; | |
122 | |
123 ScopedPtrDeque<base::DelegateSimpleThread> workers_; | |
124 | |
125 DISALLOW_COPY_AND_ASSIGN(TaskGraphRunner); | |
126 }; | |
127 | |
128 TaskGraphRunner::TaskGraphRunner( | |
129 size_t num_threads, const std::string& thread_name_prefix) | |
130 : lock_(), | |
131 has_ready_to_run_tasks_cv_(&lock_), | |
132 has_namespaces_with_finished_running_tasks_cv_(&lock_), | |
133 next_thread_index_(0), | |
134 shutdown_(false) { | |
135 base::AutoLock lock(lock_); | |
136 | |
137 while (workers_.size() < num_threads) { | |
138 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( | |
139 new base::DelegateSimpleThread( | |
140 this, | |
141 thread_name_prefix + | |
142 base::StringPrintf( | |
143 "Worker%u", | |
144 static_cast<unsigned>(workers_.size() + 1)).c_str())); | |
145 worker->Start(); | |
146 #if defined(OS_ANDROID) || defined(OS_LINUX) | |
147 worker->SetThreadPriority(base::kThreadPriority_Background); | |
148 #endif | |
149 workers_.push_back(worker.Pass()); | |
150 } | |
151 } | |
152 | |
153 TaskGraphRunner::~TaskGraphRunner() { | |
154 { | |
155 base::AutoLock lock(lock_); | |
156 | |
157 DCHECK_EQ(0u, ready_to_run_namespaces_.size()); | |
158 DCHECK_EQ(0u, namespaces_.size()); | |
159 | |
160 DCHECK(!shutdown_); | |
161 shutdown_ = true; | |
162 | |
163 // Wake up a worker so it knows it should exit. This will cause all workers | |
164 // to exit as each will wake up another worker before exiting. | |
165 has_ready_to_run_tasks_cv_.Signal(); | |
166 } | |
167 | |
168 while (workers_.size()) { | |
169 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); | |
170 // http://crbug.com/240453 - Join() is considered IO and will block this | |
171 // thread. See also http://crbug.com/239423 for further ideas. | |
172 base::ThreadRestrictions::ScopedAllowIO allow_io; | |
173 worker->Join(); | |
174 } | |
175 } | |
176 | |
177 void TaskGraphRunner::Register(const WorkerPool* worker_pool) { | |
178 base::AutoLock lock(lock_); | |
179 | |
180 DCHECK(namespaces_.find(worker_pool) == namespaces_.end()); | |
181 linked_ptr<TaskNamespace> task_set = make_linked_ptr(new TaskNamespace()); | |
182 namespaces_[worker_pool] = task_set; | |
183 } | |
184 | |
185 void TaskGraphRunner::Unregister(const WorkerPool* worker_pool) { | |
186 base::AutoLock lock(lock_); | |
187 | |
188 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
189 DCHECK_EQ(0u, namespaces_[worker_pool]->pending_tasks.size()); | |
190 DCHECK_EQ(0u, namespaces_[worker_pool]->ready_to_run_tasks.size()); | |
191 DCHECK_EQ(0u, namespaces_[worker_pool]->running_tasks.size()); | |
192 DCHECK_EQ(0u, namespaces_[worker_pool]->completed_tasks.size()); | |
193 | |
194 namespaces_.erase(worker_pool); | |
195 } | |
196 | |
197 void TaskGraphRunner::WaitForTasksToFinishRunning( | |
198 const WorkerPool* worker_pool) { | |
199 base::AutoLock lock(lock_); | |
200 | |
201 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
202 TaskNamespace* task_namespace = namespaces_[worker_pool].get(); | |
203 | |
204 while (!has_finished_running_tasks(task_namespace)) | |
205 has_namespaces_with_finished_running_tasks_cv_.Wait(); | |
206 | |
207 // There may be other namespaces that have finished running | |
208 // tasks, so wake up another origin thread. | |
209 has_namespaces_with_finished_running_tasks_cv_.Signal(); | |
210 } | |
211 | |
212 void TaskGraphRunner::SetTaskGraph(const WorkerPool* worker_pool, | |
213 TaskGraph* graph) { | |
214 TaskGraph new_pending_tasks; | |
215 TaskGraph new_running_tasks; | |
216 | |
217 new_pending_tasks.swap(*graph); | |
218 | |
219 { | |
220 base::AutoLock lock(lock_); | |
221 | |
222 DCHECK(!shutdown_); | |
223 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
224 TaskNamespace* task_namespace = namespaces_[worker_pool].get(); | |
225 | |
226 // First remove all completed tasks from |new_pending_tasks| and | |
227 // adjust number of dependencies. | |
228 for (TaskVector::iterator it = task_namespace->completed_tasks.begin(); | |
229 it != task_namespace->completed_tasks.end(); ++it) { | |
230 internal::WorkerPoolTask* task = it->get(); | |
231 | |
232 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( | |
233 task); | |
234 if (node) { | |
235 for (internal::GraphNode::Vector::const_iterator it = | |
236 node->dependents().begin(); | |
237 it != node->dependents().end(); ++it) { | |
238 internal::GraphNode* dependent_node = *it; | |
239 dependent_node->remove_dependency(); | |
240 } | |
241 } | |
242 } | |
243 | |
244 // Build new running task set. | |
245 for (TaskGraph::iterator it = task_namespace->running_tasks.begin(); | |
246 it != task_namespace->running_tasks.end(); ++it) { | |
247 internal::WorkerPoolTask* task = it->first; | |
248 // Transfer scheduled task value from |new_pending_tasks| to | |
249 // |new_running_tasks| if currently running. Value must be set to | |
250 // NULL if |new_pending_tasks| doesn't contain task. This does | |
251 // the right in both cases. | |
252 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); | |
253 } | |
254 | |
255 // Build new "ready to run" tasks queue. | |
256 task_namespace->ready_to_run_tasks.clear(); | |
257 for (TaskGraph::iterator it = new_pending_tasks.begin(); | |
258 it != new_pending_tasks.end(); ++it) { | |
259 internal::WorkerPoolTask* task = it->first; | |
260 DCHECK(task); | |
261 internal::GraphNode* node = it->second; | |
262 | |
263 // Completed tasks should not exist in |new_pending_tasks|. | |
264 DCHECK(!task->HasFinishedRunning()); | |
265 | |
266 // Call DidSchedule() to indicate that this task has been scheduled. | |
267 // Note: This is only for debugging purposes. | |
268 task->DidSchedule(); | |
269 | |
270 if (!node->num_dependencies()) | |
271 task_namespace->ready_to_run_tasks.push_back(node); | |
272 | |
273 // Erase the task from old pending tasks. | |
274 task_namespace->pending_tasks.erase(task); | |
275 } | |
276 | |
277 // Rearrange the elements in |ready_to_run_tasks| in such a way that | |
278 // they form a heap. | |
279 std::make_heap(task_namespace->ready_to_run_tasks.begin(), | |
280 task_namespace->ready_to_run_tasks.end(), | |
281 CompareTaskPriority); | |
282 | |
283 task_namespace->completed_tasks.reserve( | |
284 task_namespace->completed_tasks.size() + | |
285 task_namespace->pending_tasks.size()); | |
286 | |
287 // The items left in |pending_tasks| need to be canceled. | |
288 for (TaskGraph::const_iterator it = task_namespace->pending_tasks.begin(); | |
289 it != task_namespace->pending_tasks.end(); ++it) { | |
290 task_namespace->completed_tasks.push_back(it->first); | |
291 } | |
292 | |
293 // Swap task sets. | |
294 // Note: old tasks are intentionally destroyed after releasing |lock_|. | |
295 task_namespace->pending_tasks.swap(new_pending_tasks); | |
296 task_namespace->running_tasks.swap(new_running_tasks); | |
297 | |
298 // If |ready_to_run_tasks| is empty, it means we either have | |
299 // running tasks, or we have no pending tasks. | |
300 DCHECK(!task_namespace->ready_to_run_tasks.empty() || | |
301 (task_namespace->pending_tasks.empty() || | |
302 !task_namespace->running_tasks.empty())); | |
303 | |
304 // Build new "ready to run" task namespaces queue. | |
305 ready_to_run_namespaces_.clear(); | |
306 for (TaskNamespaceMap::iterator it = namespaces_.begin(); | |
307 it != namespaces_.end(); ++it) { | |
308 if (!it->second->ready_to_run_tasks.empty()) | |
309 ready_to_run_namespaces_.push_back(it->second.get()); | |
310 } | |
311 | |
312 // Rearrange the task namespaces in |ready_to_run_namespaces_| | |
313 // in such a way that they form a heap. | |
314 std::make_heap(ready_to_run_namespaces_.begin(), | |
315 ready_to_run_namespaces_.end(), | |
316 CompareTaskNamespacePriority); | |
317 | |
318 // If there is more work available, wake up worker thread. | |
319 if (!ready_to_run_namespaces_.empty()) | |
320 has_ready_to_run_tasks_cv_.Signal(); | |
321 } | |
322 } | |
323 | |
324 void TaskGraphRunner::CollectCompletedTasks( | |
325 const WorkerPool* worker_pool, TaskVector* completed_tasks) { | |
326 base::AutoLock lock(lock_); | |
327 | |
328 DCHECK_EQ(0u, completed_tasks->size()); | |
329 DCHECK(namespaces_.find(worker_pool) != namespaces_.end()); | |
330 completed_tasks->swap(namespaces_[worker_pool]->completed_tasks); | |
331 } | |
332 | |
333 void TaskGraphRunner::Run() { | |
334 base::AutoLock lock(lock_); | |
335 | |
336 // Get a unique thread index. | |
337 int thread_index = next_thread_index_++; | |
338 | |
339 while (true) { | |
340 if (ready_to_run_namespaces_.empty()) { | |
341 // Exit when shutdown is set and no more tasks are pending. | |
342 if (shutdown_) | |
343 break; | |
344 | |
345 // Wait for more tasks. | |
346 has_ready_to_run_tasks_cv_.Wait(); | |
347 continue; | |
348 } | |
349 | |
350 // Take top priority TaskNamespace from |ready_to_run_namespaces_|. | |
351 std::pop_heap(ready_to_run_namespaces_.begin(), | |
352 ready_to_run_namespaces_.end(), | |
353 CompareTaskNamespacePriority); | |
354 TaskNamespace* task_namespace = ready_to_run_namespaces_.back(); | |
355 ready_to_run_namespaces_.pop_back(); | |
356 DCHECK(!task_namespace->ready_to_run_tasks.empty()); | |
357 | |
358 // Take top priority task from |ready_to_run_tasks|. | |
359 std::pop_heap(task_namespace->ready_to_run_tasks.begin(), | |
360 task_namespace->ready_to_run_tasks.end(), | |
361 CompareTaskPriority); | |
362 scoped_refptr<internal::WorkerPoolTask> task( | |
363 task_namespace->ready_to_run_tasks.back()->task()); | |
364 task_namespace->ready_to_run_tasks.pop_back(); | |
365 | |
366 // Add task namespace back to |ready_to_run_namespaces_| if not | |
367 // empty after taking top priority task. | |
368 if (!task_namespace->ready_to_run_tasks.empty()) { | |
369 ready_to_run_namespaces_.push_back(task_namespace); | |
370 std::push_heap(ready_to_run_namespaces_.begin(), | |
371 ready_to_run_namespaces_.end(), | |
372 CompareTaskNamespacePriority); | |
373 } | |
374 | |
375 // Move task from |pending_tasks| to |running_tasks|. | |
376 DCHECK(task_namespace->pending_tasks.contains(task.get())); | |
377 DCHECK(!task_namespace->running_tasks.contains(task.get())); | |
378 task_namespace->running_tasks.set( | |
379 task.get(), | |
380 task_namespace->pending_tasks.take_and_erase(task.get())); | |
381 | |
382 // There may be more work available, so wake up another worker thread. | |
383 has_ready_to_run_tasks_cv_.Signal(); | |
384 | |
385 // Call WillRun() before releasing |lock_| and running task. | |
386 task->WillRun(); | |
387 | |
388 { | |
389 base::AutoUnlock unlock(lock_); | |
390 | |
391 task->RunOnWorkerThread(thread_index); | |
392 } | |
393 | |
394 // This will mark task as finished running. | |
395 task->DidRun(); | |
396 | |
397 // Now iterate over all dependents to remove dependency and check | |
398 // if they are ready to run. | |
399 scoped_ptr<internal::GraphNode> node = | |
400 task_namespace->running_tasks.take_and_erase(task.get()); | |
401 if (node) { | |
402 bool ready_to_run_namespaces_has_heap_properties = true; | |
403 | |
404 for (internal::GraphNode::Vector::const_iterator it = | |
405 node->dependents().begin(); | |
406 it != node->dependents().end(); ++it) { | |
407 internal::GraphNode* dependent_node = *it; | |
408 | |
409 dependent_node->remove_dependency(); | |
410 // Task is ready if it has no dependencies. Add it to | |
411 // |ready_to_run_tasks_|. | |
412 if (!dependent_node->num_dependencies()) { | |
413 bool was_empty = task_namespace->ready_to_run_tasks.empty(); | |
414 task_namespace->ready_to_run_tasks.push_back(dependent_node); | |
415 std::push_heap(task_namespace->ready_to_run_tasks.begin(), | |
416 task_namespace->ready_to_run_tasks.end(), | |
417 CompareTaskPriority); | |
418 // Task namespace is ready if it has at least one ready | |
419 // to run task. Add it to |ready_to_run_namespaces_| if | |
420 // it just become ready. | |
421 if (was_empty) { | |
422 DCHECK(std::find(ready_to_run_namespaces_.begin(), | |
423 ready_to_run_namespaces_.end(), | |
424 task_namespace) == | |
425 ready_to_run_namespaces_.end()); | |
426 ready_to_run_namespaces_.push_back(task_namespace); | |
427 } | |
428 ready_to_run_namespaces_has_heap_properties = false; | |
429 } | |
430 } | |
431 | |
432 // Rearrange the task namespaces in |ready_to_run_namespaces_| | |
433 // in such a way that they yet again form a heap. | |
434 if (!ready_to_run_namespaces_has_heap_properties) { | |
435 std::make_heap(ready_to_run_namespaces_.begin(), | |
436 ready_to_run_namespaces_.end(), | |
437 CompareTaskNamespacePriority); | |
438 } | |
439 } | |
440 | |
441 // Finally add task to |completed_tasks_|. | |
442 task_namespace->completed_tasks.push_back(task); | |
443 | |
444 // If namespace has finished running all tasks, wake up origin thread. | |
445 if (has_finished_running_tasks(task_namespace)) | |
446 has_namespaces_with_finished_running_tasks_cv_.Signal(); | |
447 } | |
448 | |
449 // We noticed we should exit. Wake up the next worker so it knows it should | |
450 // exit as well (because the Shutdown() code only signals once). | |
451 has_ready_to_run_tasks_cv_.Signal(); | |
452 } | |
453 | |
454 class CompositorRasterTaskGraphRunner | |
455 : public TaskGraphRunner { | |
456 public: | |
457 CompositorRasterTaskGraphRunner() : TaskGraphRunner( | |
458 WorkerPool::GetNumRasterThreads(), "CompositorRaster") { | |
459 } | |
460 }; | |
461 | |
462 base::LazyInstance<CompositorRasterTaskGraphRunner> | |
463 g_task_graph_runner = LAZY_INSTANCE_INITIALIZER; | |
464 | |
465 const int kDefaultNumRasterThreads = 1; | |
466 | |
467 int g_num_raster_threads = 0; | |
468 | |
469 } // namespace | |
470 | |
471 namespace internal { | |
472 | |
473 WorkerPoolTask::WorkerPoolTask() | |
474 : did_schedule_(false), | |
475 did_run_(false), | |
476 did_complete_(false) { | |
477 } | |
478 | |
479 WorkerPoolTask::~WorkerPoolTask() { | |
480 DCHECK_EQ(did_schedule_, did_complete_); | |
481 DCHECK(!did_run_ || did_schedule_); | |
482 DCHECK(!did_run_ || did_complete_); | |
483 } | |
484 | |
485 void WorkerPoolTask::DidSchedule() { | |
486 DCHECK(!did_complete_); | |
487 did_schedule_ = true; | |
488 } | |
489 | |
490 void WorkerPoolTask::WillRun() { | |
491 DCHECK(did_schedule_); | |
492 DCHECK(!did_complete_); | |
493 DCHECK(!did_run_); | |
494 } | |
495 | |
496 void WorkerPoolTask::DidRun() { | |
497 did_run_ = true; | |
498 } | |
499 | |
500 void WorkerPoolTask::WillComplete() { | |
501 DCHECK(!did_complete_); | |
502 } | |
503 | |
504 void WorkerPoolTask::DidComplete() { | |
505 DCHECK(did_schedule_); | |
506 DCHECK(!did_complete_); | |
507 did_complete_ = true; | |
508 } | |
509 | |
510 bool WorkerPoolTask::HasFinishedRunning() const { | |
511 return did_run_; | |
512 } | |
513 | |
514 bool WorkerPoolTask::HasCompleted() const { | |
515 return did_complete_; | |
516 } | |
517 | |
518 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority) | |
519 : task_(task), | |
520 priority_(priority), | |
521 num_dependencies_(0) { | |
522 } | |
523 | |
524 GraphNode::~GraphNode() { | |
525 } | |
526 | |
527 } // namespace internal | |
528 | |
529 // static | |
530 void WorkerPool::SetNumRasterThreads(int num_threads) { | |
531 DCHECK_LT(0, num_threads); | |
532 DCHECK_EQ(0, g_num_raster_threads); | |
533 | |
534 g_num_raster_threads = num_threads; | |
535 } | |
536 | |
537 // static | |
538 int WorkerPool::GetNumRasterThreads() { | |
539 if (!g_num_raster_threads) | |
540 g_num_raster_threads = kDefaultNumRasterThreads; | |
541 | |
542 return g_num_raster_threads; | |
543 } | |
544 | |
545 WorkerPool::WorkerPool() : in_dispatch_completion_callbacks_(false) { | |
546 g_task_graph_runner.Pointer()->Register(this); | |
547 } | |
548 | |
549 WorkerPool::~WorkerPool() { | |
550 g_task_graph_runner.Pointer()->Unregister(this); | |
551 } | |
552 | |
553 void WorkerPool::Shutdown() { | |
554 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); | |
555 | |
556 DCHECK(!in_dispatch_completion_callbacks_); | |
557 | |
558 g_task_graph_runner.Pointer()->WaitForTasksToFinishRunning(this); | |
559 } | |
560 | |
561 void WorkerPool::SetTaskGraph(TaskGraph* graph) { | |
562 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", | |
563 "num_tasks", graph->size()); | |
564 | |
565 DCHECK(!in_dispatch_completion_callbacks_); | |
566 | |
567 g_task_graph_runner.Pointer()->SetTaskGraph(this, graph); | |
568 } | |
569 | |
570 void WorkerPool::CheckForCompletedWorkerTasks() { | |
571 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedWorkerTasks"); | |
572 | |
573 DCHECK(!in_dispatch_completion_callbacks_); | |
574 | |
575 TaskVector completed_tasks; | |
576 g_task_graph_runner.Pointer()->CollectCompletedTasks(this, &completed_tasks); | |
577 ProcessCompletedTasks(completed_tasks); | |
578 } | |
579 | |
580 void WorkerPool::ProcessCompletedTasks( | |
581 const TaskVector& completed_tasks) { | |
582 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", | |
583 "completed_task_count", completed_tasks.size()); | |
584 | |
585 // Worker pool instance is not reentrant while processing completed tasks. | |
586 in_dispatch_completion_callbacks_ = true; | |
587 | |
588 for (TaskVector::const_iterator it = completed_tasks.begin(); | |
589 it != completed_tasks.end(); | |
590 ++it) { | |
591 internal::WorkerPoolTask* task = it->get(); | |
592 | |
593 task->WillComplete(); | |
594 task->CompleteOnOriginThread(); | |
595 task->DidComplete(); | |
596 } | |
597 | |
598 in_dispatch_completion_callbacks_ = false; | |
599 } | |
600 | |
601 } // namespace cc | |
OLD | NEW |