Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1207)

Side by Side Diff: cc/resources/worker_pool.cc

Issue 73923003: Shared Raster Worker Threads (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Number of raster threads API comments Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/resources/worker_pool.h" 5 #include "cc/resources/worker_pool.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/containers/hash_tables.h" 10 #include "base/containers/hash_tables.h"
11 #include "base/debug/trace_event.h" 11 #include "base/debug/trace_event.h"
12 #include "base/lazy_instance.h"
13 #include "base/memory/linked_ptr.h"
12 #include "base/strings/stringprintf.h" 14 #include "base/strings/stringprintf.h"
13 #include "base/synchronization/condition_variable.h" 15 #include "base/synchronization/condition_variable.h"
14 #include "base/threading/simple_thread.h" 16 #include "base/threading/simple_thread.h"
15 #include "base/threading/thread_restrictions.h" 17 #include "base/threading/thread_restrictions.h"
16 #include "cc/base/scoped_ptr_deque.h" 18 #include "cc/base/scoped_ptr_deque.h"
19 #include "cc/base/switches.h"
17 20
18 namespace cc { 21 namespace cc {
19 22
20 namespace internal { 23 namespace {
21 24
22 WorkerPoolTask::WorkerPoolTask() 25 // TaskGraphRunners can process task graphs from multiple
23 : did_schedule_(false), 26 // workerpool instances. All members are guarded by |lock_|.
24 did_run_(false), 27 class TaskGraphRunner : public base::DelegateSimpleThread::Delegate {
25 did_complete_(false) { 28 public:
26 } 29 typedef WorkerPool::TaskGraph TaskGraph;
30 typedef WorkerPool::TaskVector TaskVector;
27 31
28 WorkerPoolTask::~WorkerPoolTask() { 32 TaskGraphRunner(size_t num_threads, const std::string& thread_name_prefix);
29 DCHECK_EQ(did_schedule_, did_complete_); 33 virtual ~TaskGraphRunner();
30 DCHECK(!did_run_ || did_schedule_);
31 DCHECK(!did_run_ || did_complete_);
32 }
33 34
34 void WorkerPoolTask::DidSchedule() { 35 void Register(const WorkerPool* worker_pool);
35 DCHECK(!did_complete_); 36 void Unregister(const WorkerPool* worker_pool);
36 did_schedule_ = true;
37 }
38
39 void WorkerPoolTask::WillRun() {
40 DCHECK(did_schedule_);
41 DCHECK(!did_complete_);
42 DCHECK(!did_run_);
43 }
44
45 void WorkerPoolTask::DidRun() {
46 did_run_ = true;
47 }
48
49 void WorkerPoolTask::WillComplete() {
50 DCHECK(!did_complete_);
51 }
52
53 void WorkerPoolTask::DidComplete() {
54 DCHECK(did_schedule_);
55 DCHECK(!did_complete_);
56 did_complete_ = true;
57 }
58
59 bool WorkerPoolTask::HasFinishedRunning() const {
60 return did_run_;
61 }
62
63 bool WorkerPoolTask::HasCompleted() const {
64 return did_complete_;
65 }
66
67 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority)
68 : task_(task),
69 priority_(priority),
70 num_dependencies_(0) {
71 }
72
73 GraphNode::~GraphNode() {
74 }
75
76 } // namespace internal
77
78 // Internal to the worker pool. Any data or logic that needs to be
79 // shared between threads lives in this class. All members are guarded
80 // by |lock_|.
81 class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
82 public:
83 Inner(size_t num_threads, const std::string& thread_name_prefix);
84 virtual ~Inner();
85
86 void Shutdown();
87
88 // Schedule running of tasks in |graph|. Tasks previously scheduled but 37 // Schedule running of tasks in |graph|. Tasks previously scheduled but
89 // no longer needed will be canceled unless already running. Canceled 38 // no longer needed will be canceled unless already running. Canceled
90 // tasks are moved to |completed_tasks_| without being run. The result 39 // tasks are moved to |completed_tasks| without being run. The result
91 // is that once scheduled, a task is guaranteed to end up in the 40 // is that once scheduled, a task is guaranteed to end up in the
92 // |completed_tasks_| queue even if they later get canceled by another 41 // |completed_tasks| queue even if it later get canceled by another
93 // call to SetTaskGraph(). 42 // call to SetTaskGraph().
94 void SetTaskGraph(TaskGraph* graph); 43 void SetTaskGraph(const WorkerPool* worker_pool, TaskGraph* graph);
44
45 // Wait for all scheduled tasks to finish running.
46 void WaitForTasksToFinishRunning(const WorkerPool* worker_pool);
95 47
96 // Collect all completed tasks in |completed_tasks|. 48 // Collect all completed tasks in |completed_tasks|.
97 void CollectCompletedTasks(TaskVector* completed_tasks); 49 void CollectCompletedTasks(const WorkerPool* worker_pool,
50 TaskVector* completed_tasks);
98 51
99 private: 52 private:
100 static bool CompareTaskPriority(const internal::GraphNode* a, 53 static bool CompareTaskPriority(const internal::GraphNode* a,
101 const internal::GraphNode* b) { 54 const internal::GraphNode* b) {
102 // In this system, numerically lower priority is run first. 55 // In this system, numerically lower priority is run first.
103 if (a->priority() != b->priority()) 56 if (a->priority() != b->priority())
104 return a->priority() > b->priority(); 57 return a->priority() > b->priority();
105 58
106 // Run task with most dependents first when priority is the same. 59 // Run task with most dependents first when priority is the same.
107 return a->dependents().size() < b->dependents().size(); 60 return a->dependents().size() < b->dependents().size();
108 } 61 }
109 62
63 struct TaskNamespace {
64 // This set contains all pending tasks.
65 TaskGraph pending_tasks;
66 // This set contains all currently running tasks.
67 TaskGraph running_tasks;
68 // Completed tasks not yet collected by origin thread.
69 TaskVector completed_tasks;
70 // Ordered set of tasks that are ready to run.
71 internal::GraphNode::Vector ready_to_run_tasks;
72 };
73
74 static bool CompareTaskNamespacePriority(const TaskNamespace* a,
75 const TaskNamespace* b) {
76 DCHECK(!a->ready_to_run_tasks.empty());
77 DCHECK(!a->ready_to_run_tasks.empty());
vmpstr 2014/01/06 20:46:31 typo: b->...
78 return CompareTaskPriority(a->ready_to_run_tasks.back(),
vmpstr 2014/01/06 20:46:31 According to make_heap docs, the max element in a
reveman 2014/01/06 21:59:39 I'm usually not a fan of putting the type in the v
vmpstr 2014/01/06 22:07:20 I think a comment here is fine. (and possibly anyw
sohanjg 2014/01/07 08:35:23 Done.
79 b->ready_to_run_tasks.back());
80 }
81
82 typedef std::map<const WorkerPool*, linked_ptr<TaskNamespace> >
83 TaskNamespaceMap;
84
110 // Overridden from base::DelegateSimpleThread: 85 // Overridden from base::DelegateSimpleThread:
111 virtual void Run() OVERRIDE; 86 virtual void Run() OVERRIDE;
112 87
88 inline bool has_finished_running_tasks(TaskNamespace* task_namespace) {
89 return (task_namespace->pending_tasks.empty() &&
90 task_namespace->running_tasks.empty());
91 }
92
113 // This lock protects all members of this class except 93 // This lock protects all members of this class except
114 // |worker_pool_on_origin_thread_|. Do not read or modify anything 94 // |worker_pool_on_origin_thread_|. Do not read or modify anything
115 // without holding this lock. Do not block while holding this lock. 95 // without holding this lock. Do not block while holding this lock.
116 mutable base::Lock lock_; 96 mutable base::Lock lock_;
117 97
118 // Condition variable that is waited on by worker threads until new 98 // Condition variable that is waited on by worker threads until new
119 // tasks are ready to run or shutdown starts. 99 // tasks are ready to run or shutdown starts.
120 base::ConditionVariable has_ready_to_run_tasks_cv_; 100 base::ConditionVariable has_ready_to_run_tasks_cv_;
121 101
102 // Condition variable that is waited on by origin threads until a
103 // namespace has finished running all associated tasks.
104 base::ConditionVariable has_namespaces_with_finished_running_tasks_cv_;
105
122 // Provides each running thread loop with a unique index. First thread 106 // Provides each running thread loop with a unique index. First thread
123 // loop index is 0. 107 // loop index is 0.
124 unsigned next_thread_index_; 108 unsigned next_thread_index_;
125 109
126 // Set during shutdown. Tells workers to exit when no more tasks 110 // Set during shutdown. Tells workers to exit when no more tasks
127 // are pending. 111 // are pending.
128 bool shutdown_; 112 bool shutdown_;
129 113
130 // This set contains all pending tasks. 114 // This set contains all registered namespaces.
131 GraphNodeMap pending_tasks_; 115 TaskNamespaceMap namespaces_;
132 116
133 // Priority queue containing tasks that are ready to run. 117 // Ordered set of tasks namespaces that have ready to run tasks.
134 internal::GraphNode::Vector ready_to_run_tasks_; 118 std::vector<TaskNamespace*> ready_to_run_namespaces_;
135
136 // This set contains all currently running tasks.
137 GraphNodeMap running_tasks_;
138
139 // Completed tasks not yet collected by origin thread.
140 TaskVector completed_tasks_;
141 119
142 ScopedPtrDeque<base::DelegateSimpleThread> workers_; 120 ScopedPtrDeque<base::DelegateSimpleThread> workers_;
143 121
144 DISALLOW_COPY_AND_ASSIGN(Inner); 122 DISALLOW_COPY_AND_ASSIGN(TaskGraphRunner);
145 }; 123 };
146 124
147 WorkerPool::Inner::Inner( 125 TaskGraphRunner::TaskGraphRunner(
148 size_t num_threads, const std::string& thread_name_prefix) 126 size_t num_threads, const std::string& thread_name_prefix)
149 : lock_(), 127 : lock_(),
150 has_ready_to_run_tasks_cv_(&lock_), 128 has_ready_to_run_tasks_cv_(&lock_),
129 has_namespaces_with_finished_running_tasks_cv_(&lock_),
151 next_thread_index_(0), 130 next_thread_index_(0),
152 shutdown_(false) { 131 shutdown_(false) {
153 base::AutoLock lock(lock_); 132 base::AutoLock lock(lock_);
154 133
155 while (workers_.size() < num_threads) { 134 while (workers_.size() < num_threads) {
156 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( 135 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr(
157 new base::DelegateSimpleThread( 136 new base::DelegateSimpleThread(
158 this, 137 this,
159 thread_name_prefix + 138 thread_name_prefix +
160 base::StringPrintf( 139 base::StringPrintf(
161 "Worker%u", 140 "Worker%u",
162 static_cast<unsigned>(workers_.size() + 1)).c_str())); 141 static_cast<unsigned>(workers_.size() + 1)).c_str()));
163 worker->Start(); 142 worker->Start();
164 #if defined(OS_ANDROID) || defined(OS_LINUX) 143 #if defined(OS_ANDROID) || defined(OS_LINUX)
165 worker->SetThreadPriority(base::kThreadPriority_Background); 144 worker->SetThreadPriority(base::kThreadPriority_Background);
166 #endif 145 #endif
167 workers_.push_back(worker.Pass()); 146 workers_.push_back(worker.Pass());
168 } 147 }
169 } 148 }
170 149
171 WorkerPool::Inner::~Inner() { 150 TaskGraphRunner::~TaskGraphRunner() {
172 base::AutoLock lock(lock_);
173
174 DCHECK(shutdown_);
175
176 DCHECK_EQ(0u, pending_tasks_.size());
177 DCHECK_EQ(0u, ready_to_run_tasks_.size());
178 DCHECK_EQ(0u, running_tasks_.size());
179 DCHECK_EQ(0u, completed_tasks_.size());
180 }
181
182 void WorkerPool::Inner::Shutdown() {
183 { 151 {
184 base::AutoLock lock(lock_); 152 base::AutoLock lock(lock_);
185 153
154 DCHECK_EQ(0u, ready_to_run_namespaces_.size());
155 DCHECK_EQ(0u, namespaces_.size());
156
186 DCHECK(!shutdown_); 157 DCHECK(!shutdown_);
187 shutdown_ = true; 158 shutdown_ = true;
188 159
189 // Wake up a worker so it knows it should exit. This will cause all workers 160 // Wake up a worker so it knows it should exit. This will cause all workers
190 // to exit as each will wake up another worker before exiting. 161 // to exit as each will wake up another worker before exiting.
191 has_ready_to_run_tasks_cv_.Signal(); 162 has_ready_to_run_tasks_cv_.Signal();
192 } 163 }
193 164
194 while (workers_.size()) { 165 while (workers_.size()) {
195 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); 166 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front();
196 // http://crbug.com/240453 - Join() is considered IO and will block this 167 // http://crbug.com/240453 - Join() is considered IO and will block this
197 // thread. See also http://crbug.com/239423 for further ideas. 168 // thread. See also http://crbug.com/239423 for further ideas.
198 base::ThreadRestrictions::ScopedAllowIO allow_io; 169 base::ThreadRestrictions::ScopedAllowIO allow_io;
199 worker->Join(); 170 worker->Join();
200 } 171 }
201 } 172 }
202 173
203 void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { 174 void TaskGraphRunner::Register(const WorkerPool* worker_pool) {
204 // It is OK to call SetTaskGraph() after shutdown if |graph| is empty. 175 base::AutoLock lock(lock_);
205 DCHECK(graph->empty() || !shutdown_);
206 176
207 GraphNodeMap new_pending_tasks; 177 DCHECK(namespaces_.find(worker_pool) == namespaces_.end());
208 GraphNodeMap new_running_tasks; 178 linked_ptr<TaskNamespace> task_set = make_linked_ptr(new TaskNamespace());
vmpstr 2014/01/06 20:46:31 Sorry if this is a silly question, but why linked_
reveman 2014/01/06 21:59:39 linked_ptr is preferred in this case as we'll neve
sohanjg 2014/01/07 08:35:23 We used linked_ptr instead of ScopedPtrHashMap bec
179 namespaces_[worker_pool] = task_set;
180 }
181
182 void TaskGraphRunner::Unregister(const WorkerPool* worker_pool) {
183 base::AutoLock lock(lock_);
184
185 DCHECK(namespaces_.find(worker_pool) != namespaces_.end());
186 DCHECK_EQ(0u, namespaces_[worker_pool]->pending_tasks.size());
187 DCHECK_EQ(0u, namespaces_[worker_pool]->ready_to_run_tasks.size());
188 DCHECK_EQ(0u, namespaces_[worker_pool]->running_tasks.size());
189 DCHECK_EQ(0u, namespaces_[worker_pool]->completed_tasks.size());
190
191 namespaces_.erase(worker_pool);
192 }
193
194 void TaskGraphRunner::WaitForTasksToFinishRunning(
195 const WorkerPool* worker_pool) {
196 base::AutoLock lock(lock_);
197
198 DCHECK(namespaces_.find(worker_pool) != namespaces_.end());
199 TaskNamespace* task_namespace = namespaces_[worker_pool].get();
200
201 while (!has_finished_running_tasks(task_namespace))
202 has_namespaces_with_finished_running_tasks_cv_.Wait();
203
204 // There may be other namespaces that have finished running
205 // tasks, so wake up another origin thread.
206 has_namespaces_with_finished_running_tasks_cv_.Signal();
207 }
208
209 void TaskGraphRunner::SetTaskGraph(const WorkerPool* worker_pool,
210 TaskGraph* graph) {
211 TaskGraph new_pending_tasks;
212 TaskGraph new_running_tasks;
209 213
210 new_pending_tasks.swap(*graph); 214 new_pending_tasks.swap(*graph);
211 215
212 { 216 {
213 base::AutoLock lock(lock_); 217 base::AutoLock lock(lock_);
214 218
219 DCHECK(!shutdown_);
220 DCHECK(namespaces_.find(worker_pool) != namespaces_.end());
221 TaskNamespace* task_namespace = namespaces_[worker_pool].get();
222
215 // First remove all completed tasks from |new_pending_tasks| and 223 // First remove all completed tasks from |new_pending_tasks| and
216 // adjust number of dependencies. 224 // adjust number of dependencies.
217 for (TaskVector::iterator it = completed_tasks_.begin(); 225 for (TaskVector::iterator it = task_namespace->completed_tasks.begin();
218 it != completed_tasks_.end(); ++it) { 226 it != task_namespace->completed_tasks.end(); ++it) {
219 internal::WorkerPoolTask* task = it->get(); 227 internal::WorkerPoolTask* task = it->get();
220 228
221 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( 229 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase(
222 task); 230 task);
223 if (node) { 231 if (node) {
224 for (internal::GraphNode::Vector::const_iterator it = 232 for (internal::GraphNode::Vector::const_iterator it =
225 node->dependents().begin(); 233 node->dependents().begin();
226 it != node->dependents().end(); ++it) { 234 it != node->dependents().end(); ++it) {
227 internal::GraphNode* dependent_node = *it; 235 internal::GraphNode* dependent_node = *it;
228 dependent_node->remove_dependency(); 236 dependent_node->remove_dependency();
229 } 237 }
230 } 238 }
231 } 239 }
232 240
233 // Build new running task set. 241 // Build new running task set.
234 for (GraphNodeMap::iterator it = running_tasks_.begin(); 242 for (TaskGraph::iterator it = task_namespace->running_tasks.begin();
235 it != running_tasks_.end(); ++it) { 243 it != task_namespace->running_tasks.end(); ++it) {
236 internal::WorkerPoolTask* task = it->first; 244 internal::WorkerPoolTask* task = it->first;
237 // Transfer scheduled task value from |new_pending_tasks| to 245 // Transfer scheduled task value from |new_pending_tasks| to
238 // |new_running_tasks| if currently running. Value must be set to 246 // |new_running_tasks| if currently running. Value must be set to
239 // NULL if |new_pending_tasks| doesn't contain task. This does 247 // NULL if |new_pending_tasks| doesn't contain task. This does
240 // the right in both cases. 248 // the right in both cases.
241 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); 249 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task));
242 } 250 }
243 251
244 // Build new "ready to run" tasks queue. 252 // Build new "ready to run" tasks queue.
245 ready_to_run_tasks_.clear(); 253 task_namespace->ready_to_run_tasks.clear();
246 for (GraphNodeMap::iterator it = new_pending_tasks.begin(); 254 for (TaskGraph::iterator it = new_pending_tasks.begin();
247 it != new_pending_tasks.end(); ++it) { 255 it != new_pending_tasks.end(); ++it) {
248 internal::WorkerPoolTask* task = it->first; 256 internal::WorkerPoolTask* task = it->first;
249 DCHECK(task); 257 DCHECK(task);
250 internal::GraphNode* node = it->second; 258 internal::GraphNode* node = it->second;
251 259
252 // Completed tasks should not exist in |new_pending_tasks|. 260 // Completed tasks should not exist in |new_pending_tasks|.
253 DCHECK(!task->HasFinishedRunning()); 261 DCHECK(!task->HasFinishedRunning());
254 262
255 // Call DidSchedule() to indicate that this task has been scheduled. 263 // Call DidSchedule() to indicate that this task has been scheduled.
256 // Note: This is only for debugging purposes. 264 // Note: This is only for debugging purposes.
257 task->DidSchedule(); 265 task->DidSchedule();
258 266
259 if (!node->num_dependencies()) 267 if (!node->num_dependencies())
260 ready_to_run_tasks_.push_back(node); 268 task_namespace->ready_to_run_tasks.push_back(node);
261 269
262 // Erase the task from old pending tasks. 270 // Erase the task from old pending tasks.
263 pending_tasks_.erase(task); 271 task_namespace->pending_tasks.erase(task);
264 } 272 }
265 273
266 // Rearrange the elements in |ready_to_run_tasks_| in such a way that 274 // Rearrange the elements in |ready_to_run_tasks| in such a way that
267 // they form a heap. 275 // they form a heap.
268 std::make_heap(ready_to_run_tasks_.begin(), 276 std::make_heap(task_namespace->ready_to_run_tasks.begin(),
269 ready_to_run_tasks_.end(), 277 task_namespace->ready_to_run_tasks.end(),
270 CompareTaskPriority); 278 CompareTaskPriority);
271 279
272 completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size()); 280 task_namespace->completed_tasks.reserve(
281 task_namespace->completed_tasks.size() +
282 task_namespace->pending_tasks.size());
273 283
274 // The items left in |pending_tasks_| need to be canceled. 284 // The items left in |pending_tasks| need to be canceled.
275 for (GraphNodeMap::const_iterator it = pending_tasks_.begin(); 285 for (TaskGraph::const_iterator it = task_namespace->pending_tasks.begin();
276 it != pending_tasks_.end(); 286 it != task_namespace->pending_tasks.end(); ++it) {
277 ++it) { 287 task_namespace->completed_tasks.push_back(it->first);
278 completed_tasks_.push_back(it->first);
279 } 288 }
280 289
281 // Swap task sets. 290 // Swap task sets.
282 // Note: old tasks are intentionally destroyed after releasing |lock_|. 291 // Note: old tasks are intentionally destroyed after releasing |lock_|.
283 pending_tasks_.swap(new_pending_tasks); 292 task_namespace->pending_tasks.swap(new_pending_tasks);
284 running_tasks_.swap(new_running_tasks); 293 task_namespace->running_tasks.swap(new_running_tasks);
285 294
286 // If |ready_to_run_tasks_| is empty, it means we either have 295 // If |ready_to_run_tasks| is empty, it means we either have
287 // running tasks, or we have no pending tasks. 296 // running tasks, or we have no pending tasks.
288 DCHECK(!ready_to_run_tasks_.empty() || 297 DCHECK(!task_namespace->ready_to_run_tasks.empty() ||
289 (pending_tasks_.empty() || !running_tasks_.empty())); 298 (task_namespace->pending_tasks.empty() ||
299 !task_namespace->running_tasks.empty()));
300
301 // Build new "ready to run" task namespaces queue.
302 ready_to_run_namespaces_.clear();
303 for (TaskNamespaceMap::iterator it = namespaces_.begin();
304 it != namespaces_.end(); ++it) {
305 if (!it->second->ready_to_run_tasks.empty())
306 ready_to_run_namespaces_.push_back(it->second.get());
307 }
308
309 // Rearrange the task namespaces in |ready_to_run_namespaces_|
310 // in such a way that they form a heap.
311 std::make_heap(ready_to_run_namespaces_.begin(),
312 ready_to_run_namespaces_.end(),
313 CompareTaskNamespacePriority);
290 314
291 // If there is more work available, wake up worker thread. 315 // If there is more work available, wake up worker thread.
292 if (!ready_to_run_tasks_.empty()) 316 if (!ready_to_run_namespaces_.empty())
293 has_ready_to_run_tasks_cv_.Signal(); 317 has_ready_to_run_tasks_cv_.Signal();
294 } 318 }
295 } 319 }
296 320
297 void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) { 321 void TaskGraphRunner::CollectCompletedTasks(
322 const WorkerPool* worker_pool, TaskVector* completed_tasks) {
298 base::AutoLock lock(lock_); 323 base::AutoLock lock(lock_);
299 324
300 DCHECK_EQ(0u, completed_tasks->size()); 325 DCHECK_EQ(0u, completed_tasks->size());
301 completed_tasks->swap(completed_tasks_); 326 DCHECK(namespaces_.find(worker_pool) != namespaces_.end());
327 completed_tasks->swap(namespaces_[worker_pool]->completed_tasks);
302 } 328 }
303 329
304 void WorkerPool::Inner::Run() { 330 void TaskGraphRunner::Run() {
305 base::AutoLock lock(lock_); 331 base::AutoLock lock(lock_);
306 332
307 // Get a unique thread index. 333 // Get a unique thread index.
308 int thread_index = next_thread_index_++; 334 int thread_index = next_thread_index_++;
309 335
310 while (true) { 336 while (true) {
311 if (ready_to_run_tasks_.empty()) { 337 if (ready_to_run_namespaces_.empty()) {
312 // Exit when shutdown is set and no more tasks are pending. 338 // Exit when shutdown is set and no more tasks are pending.
313 if (shutdown_ && pending_tasks_.empty()) 339 if (shutdown_)
314 break; 340 break;
315 341
316 // Wait for more tasks. 342 // Wait for more tasks.
317 has_ready_to_run_tasks_cv_.Wait(); 343 has_ready_to_run_tasks_cv_.Wait();
318 continue; 344 continue;
319 } 345 }
320 346
321 // Take top priority task from |ready_to_run_tasks_|. 347 // Take top priority TaskNamespace from |ready_to_run_namespaces_|.
322 std::pop_heap(ready_to_run_tasks_.begin(), 348 std::pop_heap(ready_to_run_namespaces_.begin(),
323 ready_to_run_tasks_.end(), 349 ready_to_run_namespaces_.end(),
350 CompareTaskNamespacePriority);
351 TaskNamespace* task_namespace = ready_to_run_namespaces_.back();
352 ready_to_run_namespaces_.pop_back();
353 DCHECK(!task_namespace->ready_to_run_tasks.empty());
354
355 // Take top priority task from |ready_to_run_tasks|.
356 std::pop_heap(task_namespace->ready_to_run_tasks.begin(),
357 task_namespace->ready_to_run_tasks.end(),
324 CompareTaskPriority); 358 CompareTaskPriority);
325 scoped_refptr<internal::WorkerPoolTask> task( 359 scoped_refptr<internal::WorkerPoolTask> task(
326 ready_to_run_tasks_.back()->task()); 360 task_namespace->ready_to_run_tasks.back()->task());
327 ready_to_run_tasks_.pop_back(); 361 task_namespace->ready_to_run_tasks.pop_back();
328 362
329 // Move task from |pending_tasks_| to |running_tasks_|. 363 // Add task namespace back to |ready_to_run_namespaces_| if not
330 DCHECK(pending_tasks_.contains(task.get())); 364 // empty after taking top priority task.
331 DCHECK(!running_tasks_.contains(task.get())); 365 if (!task_namespace->ready_to_run_tasks.empty()) {
332 running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get())); 366 ready_to_run_namespaces_.push_back(task_namespace);
367 std::push_heap(ready_to_run_namespaces_.begin(),
368 ready_to_run_namespaces_.end(),
369 CompareTaskNamespacePriority);
370 }
371
372 // Move task from |pending_tasks| to |running_tasks|.
373 DCHECK(task_namespace->pending_tasks.contains(task.get()));
374 DCHECK(!task_namespace->running_tasks.contains(task.get()));
375 task_namespace->running_tasks.set(
376 task.get(),
377 task_namespace->pending_tasks.take_and_erase(task.get()));
333 378
334 // There may be more work available, so wake up another worker thread. 379 // There may be more work available, so wake up another worker thread.
335 has_ready_to_run_tasks_cv_.Signal(); 380 has_ready_to_run_tasks_cv_.Signal();
336 381
337 // Call WillRun() before releasing |lock_| and running task. 382 // Call WillRun() before releasing |lock_| and running task.
338 task->WillRun(); 383 task->WillRun();
339 384
340 { 385 {
341 base::AutoUnlock unlock(lock_); 386 base::AutoUnlock unlock(lock_);
342 387
343 task->RunOnWorkerThread(thread_index); 388 task->RunOnWorkerThread(thread_index);
344 } 389 }
345 390
346 // This will mark task as finished running. 391 // This will mark task as finished running.
347 task->DidRun(); 392 task->DidRun();
348 393
349 // Now iterate over all dependents to remove dependency and check 394 // Now iterate over all dependents to remove dependency and check
350 // if they are ready to run. 395 // if they are ready to run.
351 scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase( 396 scoped_ptr<internal::GraphNode> node =
352 task.get()); 397 task_namespace->running_tasks.take_and_erase(task.get());
353 if (node) { 398 if (node) {
reveman 2014/01/06 21:59:39 how about this here: bool ready_to_run_namespaces_
sohanjg 2014/01/07 08:35:23 Done.
354 for (internal::GraphNode::Vector::const_iterator it = 399 for (internal::GraphNode::Vector::const_iterator it =
355 node->dependents().begin(); 400 node->dependents().begin();
356 it != node->dependents().end(); ++it) { 401 it != node->dependents().end(); ++it) {
357 internal::GraphNode* dependent_node = *it; 402 internal::GraphNode* dependent_node = *it;
358 403
359 dependent_node->remove_dependency(); 404 dependent_node->remove_dependency();
360 // Task is ready if it has no dependencies. Add it to 405 // Task is ready if it has no dependencies. Add it to
361 // |ready_to_run_tasks_|. 406 // |ready_to_run_tasks_|.
362 if (!dependent_node->num_dependencies()) { 407 if (!dependent_node->num_dependencies()) {
363 ready_to_run_tasks_.push_back(dependent_node); 408 bool was_empty = task_namespace->ready_to_run_tasks.empty();
364 std::push_heap(ready_to_run_tasks_.begin(), 409 task_namespace->ready_to_run_tasks.push_back(dependent_node);
365 ready_to_run_tasks_.end(), 410 std::push_heap(task_namespace->ready_to_run_tasks.begin(),
411 task_namespace->ready_to_run_tasks.end(),
366 CompareTaskPriority); 412 CompareTaskPriority);
413 // Task namespace is ready if it has at least one ready
414 // to run task. Add it to |ready_to_run_namespaces_| if
415 // it just become ready.
416 if (was_empty) {
417 DCHECK(std::find(ready_to_run_namespaces_.begin(),
418 ready_to_run_namespaces_.end(),
419 task_namespace) ==
420 ready_to_run_namespaces_.end());
421 ready_to_run_namespaces_.push_back(task_namespace);
422 std::push_heap(ready_to_run_namespaces_.begin(),
vmpstr 2014/01/06 20:46:31 I think this has the same problem as before with t
reveman 2014/01/06 21:59:39 Oh, I completely missed that this was never fixed.
vmpstr 2014/01/06 22:07:20 Sounds good to me.
sohanjg 2014/01/07 08:35:23 Done.
423 ready_to_run_namespaces_.end(),
424 CompareTaskNamespacePriority);
425 }
367 } 426 }
368 } 427 }
reveman 2014/01/06 21:59:39 and this here: if (!ready_to_run_namespaces_has_he
sohanjg 2014/01/07 08:35:23 Done.
369 } 428 }
370 429
371 // Finally add task to |completed_tasks_|. 430 // Finally add task to |completed_tasks_|.
372 completed_tasks_.push_back(task); 431 task_namespace->completed_tasks.push_back(task);
432
433 // If namespace has finished running all tasks, wake up origin thread.
434 if (has_finished_running_tasks(task_namespace))
435 has_namespaces_with_finished_running_tasks_cv_.Signal();
373 } 436 }
374 437
375 // We noticed we should exit. Wake up the next worker so it knows it should 438 // We noticed we should exit. Wake up the next worker so it knows it should
376 // exit as well (because the Shutdown() code only signals once). 439 // exit as well (because the Shutdown() code only signals once).
377 has_ready_to_run_tasks_cv_.Signal(); 440 has_ready_to_run_tasks_cv_.Signal();
378 } 441 }
379 442
380 WorkerPool::WorkerPool(size_t num_threads, 443 class CC_EXPORT CompositorRasterTaskGraphRunner
381 const std::string& thread_name_prefix) 444 : public TaskGraphRunner {
382 : in_dispatch_completion_callbacks_(false), 445 public:
383 inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) { 446 CompositorRasterTaskGraphRunner() : TaskGraphRunner(
447 WorkerPool::GetNumRasterThreads(), "CompositorRaster") {
448 }
449 };
450
451 base::LazyInstance<CompositorRasterTaskGraphRunner>
452 g_task_graph_runner = LAZY_INSTANCE_INITIALIZER;
453
454 } // namespace
455
456 namespace internal {
457
458 WorkerPoolTask::WorkerPoolTask()
459 : did_schedule_(false),
460 did_run_(false),
461 did_complete_(false) {
462 }
463
464 WorkerPoolTask::~WorkerPoolTask() {
465 DCHECK_EQ(did_schedule_, did_complete_);
466 DCHECK(!did_run_ || did_schedule_);
467 DCHECK(!did_run_ || did_complete_);
468 }
469
470 void WorkerPoolTask::DidSchedule() {
471 DCHECK(!did_complete_);
472 did_schedule_ = true;
473 }
474
475 void WorkerPoolTask::WillRun() {
476 DCHECK(did_schedule_);
477 DCHECK(!did_complete_);
478 DCHECK(!did_run_);
479 }
480
481 void WorkerPoolTask::DidRun() {
482 did_run_ = true;
483 }
484
485 void WorkerPoolTask::WillComplete() {
486 DCHECK(!did_complete_);
487 }
488
489 void WorkerPoolTask::DidComplete() {
490 DCHECK(did_schedule_);
491 DCHECK(!did_complete_);
492 did_complete_ = true;
493 }
494
495 bool WorkerPoolTask::HasFinishedRunning() const {
496 return did_run_;
497 }
498
499 bool WorkerPoolTask::HasCompleted() const {
500 return did_complete_;
501 }
502
503 GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority)
504 : task_(task),
505 priority_(priority),
506 num_dependencies_(0) {
507 }
508
509 GraphNode::~GraphNode() {
510 }
511
512 } // namespace internal
513
514 WorkerPool::WorkerPool() : in_dispatch_completion_callbacks_(false) {
515 g_task_graph_runner.Pointer()->Register(this);
384 } 516 }
385 517
386 WorkerPool::~WorkerPool() { 518 WorkerPool::~WorkerPool() {
519 g_task_graph_runner.Pointer()->Unregister(this);
387 } 520 }
388 521
389 void WorkerPool::Shutdown() { 522 void WorkerPool::Shutdown() {
390 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); 523 TRACE_EVENT0("cc", "WorkerPool::Shutdown");
391 524
392 DCHECK(!in_dispatch_completion_callbacks_); 525 DCHECK(!in_dispatch_completion_callbacks_);
393 526
394 inner_->Shutdown(); 527 g_task_graph_runner.Pointer()->WaitForTasksToFinishRunning(this);
395 } 528 }
396 529
397 void WorkerPool::CheckForCompletedTasks() { 530 void WorkerPool::CheckForCompletedTasks() {
398 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); 531 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks");
399 532
400 DCHECK(!in_dispatch_completion_callbacks_); 533 DCHECK(!in_dispatch_completion_callbacks_);
401 534
402 TaskVector completed_tasks; 535 TaskVector completed_tasks;
403 inner_->CollectCompletedTasks(&completed_tasks); 536 g_task_graph_runner.Pointer()->CollectCompletedTasks(this, &completed_tasks);
404 ProcessCompletedTasks(completed_tasks); 537 ProcessCompletedTasks(completed_tasks);
405 } 538 }
406 539
407 void WorkerPool::ProcessCompletedTasks( 540 void WorkerPool::ProcessCompletedTasks(
408 const TaskVector& completed_tasks) { 541 const TaskVector& completed_tasks) {
409 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", 542 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks",
410 "completed_task_count", completed_tasks.size()); 543 "completed_task_count", completed_tasks.size());
411 544
412 // Worker pool instance is not reentrant while processing completed tasks. 545 // Worker pool instance is not reentrant while processing completed tasks.
413 in_dispatch_completion_callbacks_ = true; 546 in_dispatch_completion_callbacks_ = true;
(...skipping 10 matching lines...) Expand all
424 557
425 in_dispatch_completion_callbacks_ = false; 558 in_dispatch_completion_callbacks_ = false;
426 } 559 }
427 560
428 void WorkerPool::SetTaskGraph(TaskGraph* graph) { 561 void WorkerPool::SetTaskGraph(TaskGraph* graph) {
429 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", 562 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph",
430 "num_tasks", graph->size()); 563 "num_tasks", graph->size());
431 564
432 DCHECK(!in_dispatch_completion_callbacks_); 565 DCHECK(!in_dispatch_completion_callbacks_);
433 566
434 inner_->SetTaskGraph(graph); 567 g_task_graph_runner.Pointer()->SetTaskGraph(this, graph);
435 } 568 }
436 569
437 } // namespace cc 570 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698