Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/resources/worker_pool.h" | 5 #include "cc/resources/worker_pool.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <queue> | 8 #include <queue> |
| 9 | 9 |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| 11 #include "base/containers/hash_tables.h" | 11 #include "base/containers/hash_tables.h" |
| 12 #include "base/debug/trace_event.h" | 12 #include "base/debug/trace_event.h" |
| 13 #include "base/lazy_instance.h" | |
| 13 #include "base/strings/stringprintf.h" | 14 #include "base/strings/stringprintf.h" |
| 14 #include "base/synchronization/condition_variable.h" | 15 #include "base/synchronization/condition_variable.h" |
| 15 #include "base/threading/simple_thread.h" | 16 #include "base/threading/simple_thread.h" |
| 16 #include "base/threading/thread_restrictions.h" | 17 #include "base/threading/thread_restrictions.h" |
| 17 #include "cc/base/scoped_ptr_deque.h" | 18 #include "cc/base/scoped_ptr_deque.h" |
| 19 #include "cc/base/switches.h" | |
| 18 | 20 |
| 19 namespace cc { | 21 namespace cc { |
| 20 | 22 |
| 21 namespace internal { | 23 namespace internal { |
| 22 | 24 |
| 23 WorkerPoolTask::WorkerPoolTask() | 25 WorkerPoolTask::WorkerPoolTask() |
| 24 : did_schedule_(false), | 26 : did_schedule_(false), |
| 25 did_run_(false), | 27 did_run_(false), |
| 26 did_complete_(false) { | 28 did_complete_(false) { |
| 27 } | 29 } |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 77 } // namespace internal | 79 } // namespace internal |
| 78 | 80 |
| 79 // Internal to the worker pool. Any data or logic that needs to be | 81 // Internal to the worker pool. Any data or logic that needs to be |
| 80 // shared between threads lives in this class. All members are guarded | 82 // shared between threads lives in this class. All members are guarded |
| 81 // by |lock_|. | 83 // by |lock_|. |
| 82 class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { | 84 class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
| 83 public: | 85 public: |
| 84 Inner(size_t num_threads, const std::string& thread_name_prefix); | 86 Inner(size_t num_threads, const std::string& thread_name_prefix); |
| 85 virtual ~Inner(); | 87 virtual ~Inner(); |
| 86 | 88 |
| 87 void Shutdown(); | 89 void Shutdown(WorkerPool* wp); |
|
reveman
2013/11/21 16:36:10
I don't think we should change Shutdown(). That's
| |
| 88 | 90 |
| 89 // Schedule running of tasks in |graph|. Tasks previously scheduled but | 91 // Schedule running of tasks in |graph|. Tasks previously scheduled but |
| 90 // no longer needed will be canceled unless already running. Canceled | 92 // no longer needed will be canceled unless already running. Canceled |
| 91 // tasks are moved to |completed_tasks_| without being run. The result | 93 // tasks are moved to |completed_tasks_| without being run. The result |
| 92 // is that once scheduled, a task is guaranteed to end up in the | 94 // is that once scheduled, a task is guaranteed to end up in the |
| 93 // |completed_tasks_| queue even if they later get canceled by another | 95 // |completed_tasks_| queue even if they later get canceled by another |
| 94 // call to SetTaskGraph(). | 96 // call to SetTaskGraph(). |
| 95 void SetTaskGraph(TaskGraph* graph); | 97 |
| 98 void SetTaskGraph(TaskGraph* graph, WorkerPool* wp); | |
|
reveman
2013/11/21 16:36:10
Prefer if you pass WorkerPool pointer as first par
| |
| 96 | 99 |
| 97 // Collect all completed tasks in |completed_tasks|. | 100 // Collect all completed tasks in |completed_tasks|. |
| 98 void CollectCompletedTasks(TaskVector* completed_tasks); | 101 void CollectCompletedTasks(TaskVector* completed_tasks, WorkerPool* wp); |
|
reveman
2013/11/21 16:36:10
Here and above. Please don't abbreviate the parame
| |
| 99 | 102 |
| 100 private: | 103 private: |
| 101 class PriorityComparator { | 104 class PriorityComparator { |
| 102 public: | 105 public: |
| 103 bool operator()(const internal::GraphNode* a, | 106 bool operator()(const internal::GraphNode* a, |
| 104 const internal::GraphNode* b) { | 107 const internal::GraphNode* b) { |
| 105 // In this system, numerically lower priority is run first. | 108 // In this system, numerically lower priority is run first. |
| 106 if (a->priority() != b->priority()) | 109 if (a->priority() != b->priority()) |
| 107 return a->priority() > b->priority(); | 110 return a->priority() > b->priority(); |
| 108 | 111 |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 124 base::ConditionVariable has_ready_to_run_tasks_cv_; | 127 base::ConditionVariable has_ready_to_run_tasks_cv_; |
| 125 | 128 |
| 126 // Provides each running thread loop with a unique index. First thread | 129 // Provides each running thread loop with a unique index. First thread |
| 127 // loop index is 0. | 130 // loop index is 0. |
| 128 unsigned next_thread_index_; | 131 unsigned next_thread_index_; |
| 129 | 132 |
| 130 // Set during shutdown. Tells workers to exit when no more tasks | 133 // Set during shutdown. Tells workers to exit when no more tasks |
| 131 // are pending. | 134 // are pending. |
| 132 bool shutdown_; | 135 bool shutdown_; |
| 133 | 136 |
| 134 // This set contains all pending tasks. | |
| 135 GraphNodeMap pending_tasks_; | |
| 136 | 137 |
| 137 // Ordered set of tasks that are ready to run. | 138 // Ordered set of tasks that are ready to run. |
| 138 typedef std::priority_queue<internal::GraphNode*, | 139 typedef std::priority_queue<internal::GraphNode*, |
| 139 std::vector<internal::GraphNode*>, | 140 std::vector<internal::GraphNode*>, |
| 140 PriorityComparator> TaskQueue; | 141 PriorityComparator> TaskQueue; |
| 141 TaskQueue ready_to_run_tasks_; | 142 TaskQueue ready_to_run_tasks_; |
| 142 | 143 |
| 143 // This set contains all currently running tasks. | |
| 144 GraphNodeMap running_tasks_; | |
| 145 | |
| 146 // Completed tasks not yet collected by origin thread. | |
| 147 TaskVector completed_tasks_; | |
|
reveman
2013/11/21 16:36:10
I don't think we should move any of these out of t
| |
| 148 | 144 |
| 149 ScopedPtrDeque<base::DelegateSimpleThread> workers_; | 145 ScopedPtrDeque<base::DelegateSimpleThread> workers_; |
| 150 | 146 |
| 147 // Maintain a WorkerPool instance per Inner for Task Namespace | |
| 148 WorkerPool* wp_; | |
|
reveman
2013/11/21 16:36:10
If you keep a pointer to one WorkerPool instance t
| |
| 149 | |
| 151 DISALLOW_COPY_AND_ASSIGN(Inner); | 150 DISALLOW_COPY_AND_ASSIGN(Inner); |
| 152 }; | 151 }; |
| 153 | 152 |
| 153 class CC_EXPORT DerivedInner : public WorkerPool::Inner { | |
| 154 public: | |
| 155 DerivedInner(); | |
| 156 }; | |
| 157 | |
| 158 base::LazyInstance<DerivedInner> g_workerpool_inner; | |
| 159 | |
| 154 WorkerPool::Inner::Inner( | 160 WorkerPool::Inner::Inner( |
| 155 size_t num_threads, const std::string& thread_name_prefix) | 161 size_t num_threads, const std::string& thread_name_prefix) |
| 156 : lock_(), | 162 : lock_(), |
| 157 has_ready_to_run_tasks_cv_(&lock_), | 163 has_ready_to_run_tasks_cv_(&lock_), |
| 158 next_thread_index_(0), | 164 next_thread_index_(0), |
| 159 shutdown_(false) { | 165 shutdown_(false) { |
| 160 base::AutoLock lock(lock_); | 166 base::AutoLock lock(lock_); |
| 161 | |
| 162 while (workers_.size() < num_threads) { | 167 while (workers_.size() < num_threads) { |
| 163 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( | 168 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( |
| 164 new base::DelegateSimpleThread( | 169 new base::DelegateSimpleThread( |
| 165 this, | 170 this, |
| 166 thread_name_prefix + | 171 thread_name_prefix + |
| 167 base::StringPrintf( | 172 base::StringPrintf( |
| 168 "Worker%u", | 173 "Worker%u", |
| 169 static_cast<unsigned>(workers_.size() + 1)).c_str())); | 174 static_cast<unsigned>(workers_.size() + 1)).c_str())); |
| 170 worker->Start(); | 175 worker->Start(); |
| 171 #if defined(OS_ANDROID) || defined(OS_LINUX) | 176 #if defined(OS_ANDROID) || defined(OS_LINUX) |
| 172 worker->SetThreadPriority(base::kThreadPriority_Background); | 177 worker->SetThreadPriority(base::kThreadPriority_Background); |
| 173 #endif | 178 #endif |
| 174 workers_.push_back(worker.Pass()); | 179 workers_.push_back(worker.Pass()); |
| 175 } | 180 } |
| 176 } | 181 } |
| 177 | 182 |
| 178 WorkerPool::Inner::~Inner() { | 183 WorkerPool::Inner::~Inner() { |
| 179 base::AutoLock lock(lock_); | 184 base::AutoLock lock(lock_); |
| 180 | |
| 181 DCHECK(shutdown_); | 185 DCHECK(shutdown_); |
| 182 | 186 DCHECK_EQ(0u, wp_->pending_tasks_.size()); |
| 183 DCHECK_EQ(0u, pending_tasks_.size()); | |
| 184 DCHECK_EQ(0u, ready_to_run_tasks_.size()); | 187 DCHECK_EQ(0u, ready_to_run_tasks_.size()); |
| 185 DCHECK_EQ(0u, running_tasks_.size()); | 188 DCHECK_EQ(0u, wp_->running_tasks_.size()); |
| 186 DCHECK_EQ(0u, completed_tasks_.size()); | 189 DCHECK_EQ(0u, wp_->completed_tasks_.size()); |
| 187 } | 190 } |
| 188 | 191 |
| 189 void WorkerPool::Inner::Shutdown() { | 192 void WorkerPool::Inner::Shutdown(WorkerPool* wp) { |
| 190 { | 193 { |
| 191 base::AutoLock lock(lock_); | 194 base::AutoLock lock(lock_); |
| 192 | 195 |
| 193 DCHECK(!shutdown_); | 196 DCHECK(!shutdown_); |
| 194 shutdown_ = true; | 197 shutdown_ = true; |
| 195 | 198 |
| 196 // Wake up a worker so it knows it should exit. This will cause all workers | 199 // Wake up a worker so it knows it should exit. This will cause all workers |
| 197 // to exit as each will wake up another worker before exiting. | 200 // to exit as each will wake up another worker before exiting. |
| 198 has_ready_to_run_tasks_cv_.Signal(); | 201 has_ready_to_run_tasks_cv_.Signal(); |
| 199 } | 202 } |
| 200 | 203 |
| 201 while (workers_.size()) { | 204 while (workers_.size()) { |
| 202 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); | 205 scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front(); |
| 203 // http://crbug.com/240453 - Join() is considered IO and will block this | 206 // http://crbug.com/240453 - Join() is considered IO and will block this |
| 204 // thread. See also http://crbug.com/239423 for further ideas. | 207 // thread. See also http://crbug.com/239423 for further ideas. |
| 205 base::ThreadRestrictions::ScopedAllowIO allow_io; | 208 base::ThreadRestrictions::ScopedAllowIO allow_io; |
| 206 worker->Join(); | 209 worker->Join(); |
| 207 } | 210 } |
| 208 } | 211 } |
| 209 | 212 |
| 210 void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) { | 213 void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph, WorkerPool* wp) { |
| 211 // It is OK to call SetTaskGraph() after shutdown if |graph| is empty. | 214 // It is OK to call SetTaskGraph() after shutdown if |graph| is empty. |
| 212 DCHECK(graph->empty() || !shutdown_); | 215 DCHECK(graph->empty() || !shutdown_); |
| 213 | 216 |
| 217 wp_ = wp; | |
|
vivekg
2013/11/22 04:44:50
Wouldn't it be better to have a DCHECK(wp) to ensu
| |
| 214 GraphNodeMap new_pending_tasks; | 218 GraphNodeMap new_pending_tasks; |
| 215 GraphNodeMap new_running_tasks; | 219 GraphNodeMap new_running_tasks; |
| 216 TaskQueue new_ready_to_run_tasks; | 220 TaskQueue new_ready_to_run_tasks; |
| 217 | 221 |
| 218 new_pending_tasks.swap(*graph); | 222 new_pending_tasks.swap(*graph); |
| 219 | 223 |
| 220 { | 224 { |
| 221 base::AutoLock lock(lock_); | 225 base::AutoLock lock(lock_); |
| 222 | 226 |
| 223 // First remove all completed tasks from |new_pending_tasks| and | 227 // First remove all completed tasks from |new_pending_tasks| and |
| 224 // adjust number of dependencies. | 228 // adjust number of dependencies. |
| 225 for (TaskVector::iterator it = completed_tasks_.begin(); | 229 for (TaskVector::iterator it = wp_->completed_tasks_.begin(); |
| 226 it != completed_tasks_.end(); ++it) { | 230 it != wp_->completed_tasks_.end(); ++it) { |
| 227 internal::WorkerPoolTask* task = it->get(); | 231 internal::WorkerPoolTask* task = it->get(); |
| 228 | 232 |
| 229 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( | 233 scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase( |
| 230 task); | 234 task); |
| 231 if (node) { | 235 if (node) { |
| 232 for (internal::GraphNode::Vector::const_iterator it = | 236 for (internal::GraphNode::Vector::const_iterator it = |
| 233 node->dependents().begin(); | 237 node->dependents().begin(); |
| 234 it != node->dependents().end(); ++it) { | 238 it != node->dependents().end(); ++it) { |
| 235 internal::GraphNode* dependent_node = *it; | 239 internal::GraphNode* dependent_node = *it; |
| 236 dependent_node->remove_dependency(); | 240 dependent_node->remove_dependency(); |
| 237 } | 241 } |
| 238 } | 242 } |
| 239 } | 243 } |
| 240 | 244 |
| 241 // Build new running task set. | 245 // Build new running task set. |
| 242 for (GraphNodeMap::iterator it = running_tasks_.begin(); | 246 for (GraphNodeMap::iterator it = wp_->running_tasks_.begin(); |
| 243 it != running_tasks_.end(); ++it) { | 247 it != wp_->running_tasks_.end(); ++it) { |
| 244 internal::WorkerPoolTask* task = it->first; | 248 internal::WorkerPoolTask* task = it->first; |
| 245 // Transfer scheduled task value from |new_pending_tasks| to | 249 // Transfer scheduled task value from |new_pending_tasks| to |
| 246 // |new_running_tasks| if currently running. Value must be set to | 250 // |new_running_tasks| if currently running. Value must be set to |
| 247 // NULL if |new_pending_tasks| doesn't contain task. This does | 251 // NULL if |new_pending_tasks| doesn't contain task. This does |
| 248 // the right in both cases. | 252 // the right in both cases. |
| 249 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); | 253 new_running_tasks.set(task, new_pending_tasks.take_and_erase(task)); |
| 250 } | 254 } |
| 251 | 255 |
| 252 // Build new "ready to run" tasks queue. | 256 // Build new "ready to run" tasks queue. |
| 253 // TODO(reveman): Create this queue when building the task graph instead. | 257 // TODO(reveman): Create this queue when building the task graph instead. |
| 254 for (GraphNodeMap::iterator it = new_pending_tasks.begin(); | 258 for (GraphNodeMap::iterator it = new_pending_tasks.begin(); |
| 255 it != new_pending_tasks.end(); ++it) { | 259 it != new_pending_tasks.end(); ++it) { |
| 256 internal::WorkerPoolTask* task = it->first; | 260 internal::WorkerPoolTask* task = it->first; |
| 257 DCHECK(task); | 261 DCHECK(task); |
| 258 internal::GraphNode* node = it->second; | 262 internal::GraphNode* node = it->second; |
| 259 | 263 |
| 260 // Completed tasks should not exist in |new_pending_tasks|. | 264 // Completed tasks should not exist in |new_pending_tasks|. |
| 261 DCHECK(!task->HasFinishedRunning()); | 265 DCHECK(!task->HasFinishedRunning()); |
| 262 | 266 |
| 263 // Call DidSchedule() to indicate that this task has been scheduled. | 267 // Call DidSchedule() to indicate that this task has been scheduled. |
| 264 // Note: This is only for debugging purposes. | 268 // Note: This is only for debugging purposes. |
| 265 task->DidSchedule(); | 269 task->DidSchedule(); |
| 266 | 270 |
| 267 if (!node->num_dependencies()) | 271 if (!node->num_dependencies()) |
| 268 new_ready_to_run_tasks.push(node); | 272 new_ready_to_run_tasks.push(node); |
| 269 | 273 |
| 270 // Erase the task from old pending tasks. | 274 // Erase the task from old pending tasks. |
| 271 pending_tasks_.erase(task); | 275 wp_->pending_tasks_.erase(task); |
| 272 } | 276 } |
| 273 | 277 |
| 274 completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size()); | 278 wp_->completed_tasks_.reserve( |
| 279 wp_->completed_tasks_.size() + wp_->pending_tasks_.size()); | |
| 275 | 280 |
| 276 // The items left in |pending_tasks_| need to be canceled. | 281 // The items left in |pending_tasks_| need to be canceled. |
| 277 for (GraphNodeMap::const_iterator it = pending_tasks_.begin(); | 282 for (GraphNodeMap::const_iterator it = wp_->pending_tasks_.begin(); |
| 278 it != pending_tasks_.end(); | 283 it != wp_->pending_tasks_.end(); |
| 279 ++it) { | 284 ++it) { |
| 280 completed_tasks_.push_back(it->first); | 285 wp_->completed_tasks_.push_back(it->first); |
| 281 } | 286 } |
| 282 | 287 |
| 283 // Swap task sets. | 288 // Swap task sets. |
| 284 // Note: old tasks are intentionally destroyed after releasing |lock_|. | 289 // Note: old tasks are intentionally destroyed after releasing |lock_|. |
| 285 pending_tasks_.swap(new_pending_tasks); | 290 wp_->pending_tasks_.swap(new_pending_tasks); |
| 286 running_tasks_.swap(new_running_tasks); | 291 wp_->running_tasks_.swap(new_running_tasks); |
| 287 std::swap(ready_to_run_tasks_, new_ready_to_run_tasks); | 292 std::swap(ready_to_run_tasks_, new_ready_to_run_tasks); |
| 288 | 293 |
| 289 // If |ready_to_run_tasks_| is empty, it means we either have | 294 // If |ready_to_run_tasks_| is empty, it means we either have |
| 290 // running tasks, or we have no pending tasks. | 295 // running tasks, or we have no pending tasks. |
| 291 DCHECK(!ready_to_run_tasks_.empty() || | 296 DCHECK(!ready_to_run_tasks_.empty() |
| 292 (pending_tasks_.empty() || !running_tasks_.empty())); | 297 ||(wp_->pending_tasks_.empty() || !wp_->running_tasks_.empty())); |
| 293 | 298 |
| 294 // If there is more work available, wake up worker thread. | 299 // If there is more work available, wake up worker thread. |
| 295 if (!ready_to_run_tasks_.empty()) | 300 if (!ready_to_run_tasks_.empty()) |
| 296 has_ready_to_run_tasks_cv_.Signal(); | 301 has_ready_to_run_tasks_cv_.Signal(); |
| 297 } | 302 } |
| 298 } | 303 } |
| 299 | 304 |
| 300 void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) { | 305 void WorkerPool::Inner::CollectCompletedTasks( |
| 306 TaskVector* completed_tasks, WorkerPool* wp) { | |
| 301 base::AutoLock lock(lock_); | 307 base::AutoLock lock(lock_); |
| 302 | |
| 303 DCHECK_EQ(0u, completed_tasks->size()); | 308 DCHECK_EQ(0u, completed_tasks->size()); |
| 304 completed_tasks->swap(completed_tasks_); | 309 completed_tasks->swap(wp->completed_tasks_); |
| 305 } | 310 } |
| 306 | |
| 307 void WorkerPool::Inner::Run() { | 311 void WorkerPool::Inner::Run() { |
| 308 base::AutoLock lock(lock_); | 312 base::AutoLock lock(lock_); |
| 309 | 313 |
| 310 // Get a unique thread index. | 314 // Get a unique thread index. |
| 311 int thread_index = next_thread_index_++; | 315 int thread_index = next_thread_index_++; |
| 312 | 316 |
| 313 while (true) { | 317 while (true) { |
| 314 if (ready_to_run_tasks_.empty()) { | 318 if (ready_to_run_tasks_.empty()) { |
| 315 // Exit when shutdown is set and no more tasks are pending. | 319 // Exit when shutdown is set and no more tasks are pending. |
| 316 if (shutdown_ && pending_tasks_.empty()) | 320 if (shutdown_ && wp_->pending_tasks_.empty()) |
| 317 break; | 321 break; |
| 318 | 322 |
| 319 // Wait for more tasks. | 323 // Wait for more tasks. |
| 320 has_ready_to_run_tasks_cv_.Wait(); | 324 has_ready_to_run_tasks_cv_.Wait(); |
| 321 continue; | 325 continue; |
| 322 } | 326 } |
| 323 | 327 |
| 324 // Take top priority task from |ready_to_run_tasks_|. | 328 // Take top priority task from |ready_to_run_tasks_|. |
| 325 scoped_refptr<internal::WorkerPoolTask> task( | 329 scoped_refptr<internal::WorkerPoolTask> task( |
| 326 ready_to_run_tasks_.top()->task()); | 330 ready_to_run_tasks_.top()->task()); |
| 327 ready_to_run_tasks_.pop(); | 331 ready_to_run_tasks_.pop(); |
| 328 | 332 |
| 329 // Move task from |pending_tasks_| to |running_tasks_|. | 333 // Move task from |pending_tasks_| to |running_tasks_|. |
| 330 DCHECK(pending_tasks_.contains(task.get())); | 334 DCHECK(wp_->pending_tasks_.contains(task.get())); |
| 331 DCHECK(!running_tasks_.contains(task.get())); | 335 DCHECK(!wp_->running_tasks_.contains(task.get())); |
| 332 running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get())); | 336 wp_->running_tasks_.set( |
| 337 task.get(), wp_->pending_tasks_.take_and_erase(task.get())); | |
| 333 | 338 |
| 334 // There may be more work available, so wake up another worker thread. | 339 // There may be more work available, so wake up another worker thread. |
| 335 has_ready_to_run_tasks_cv_.Signal(); | 340 has_ready_to_run_tasks_cv_.Signal(); |
| 336 | 341 |
| 337 // Call WillRun() before releasing |lock_| and running task. | 342 // Call WillRun() before releasing |lock_| and running task. |
| 338 task->WillRun(); | 343 task->WillRun(); |
| 339 | 344 |
| 340 { | 345 { |
| 341 base::AutoUnlock unlock(lock_); | 346 base::AutoUnlock unlock(lock_); |
| 342 | 347 |
| 343 task->RunOnWorkerThread(thread_index); | 348 task->RunOnWorkerThread(thread_index); |
| 344 } | 349 } |
| 345 | |
| 346 // This will mark task as finished running. | 350 // This will mark task as finished running. |
| 347 task->DidRun(); | 351 task->DidRun(); |
| 348 | 352 |
| 349 // Now iterate over all dependents to remove dependency and check | 353 // Now iterate over all dependents to remove dependency and check |
| 350 // if they are ready to run. | 354 // if they are ready to run. |
| 351 scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase( | 355 scoped_ptr<internal::GraphNode> node = wp_->running_tasks_.take_and_erase( |
| 352 task.get()); | 356 task.get()); |
| 353 if (node) { | 357 if (node) { |
| 354 for (internal::GraphNode::Vector::const_iterator it = | 358 for (internal::GraphNode::Vector::const_iterator it = |
| 355 node->dependents().begin(); | 359 node->dependents().begin(); |
| 356 it != node->dependents().end(); ++it) { | 360 it != node->dependents().end(); ++it) { |
| 357 internal::GraphNode* dependent_node = *it; | 361 internal::GraphNode* dependent_node = *it; |
| 358 | 362 |
| 359 dependent_node->remove_dependency(); | 363 dependent_node->remove_dependency(); |
| 360 // Task is ready if it has no dependencies. Add it to | 364 // Task is ready if it has no dependencies. Add it to |
| 361 // |ready_to_run_tasks_|. | 365 // |ready_to_run_tasks_|. |
| 362 if (!dependent_node->num_dependencies()) | 366 if (!dependent_node->num_dependencies()) |
| 363 ready_to_run_tasks_.push(dependent_node); | 367 ready_to_run_tasks_.push(dependent_node); |
| 364 } | 368 } |
| 365 } | 369 } |
| 366 | 370 |
| 367 // Finally add task to |completed_tasks_|. | 371 // Finally add task to |completed_tasks_|. |
| 368 completed_tasks_.push_back(task); | 372 wp_->completed_tasks_.push_back(task); |
| 369 } | 373 } |
| 370 | 374 |
| 371 // We noticed we should exit. Wake up the next worker so it knows it should | 375 // We noticed we should exit. Wake up the next worker so it knows it should |
| 372 // exit as well (because the Shutdown() code only signals once). | 376 // exit as well (because the Shutdown() code only signals once). |
| 373 has_ready_to_run_tasks_cv_.Signal(); | 377 has_ready_to_run_tasks_cv_.Signal(); |
| 374 } | 378 } |
| 375 | 379 |
| 376 WorkerPool::WorkerPool(size_t num_threads, | 380 WorkerPool::WorkerPool(size_t num_threads, |
| 377 const std::string& thread_name_prefix) | 381 const std::string& thread_name_prefix) |
| 378 : in_dispatch_completion_callbacks_(false), | 382 : in_dispatch_completion_callbacks_(false) { |
| 379 inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) { | |
| 380 } | 383 } |
| 381 | 384 |
| 382 WorkerPool::~WorkerPool() { | 385 WorkerPool::~WorkerPool() { |
| 383 } | 386 } |
| 384 | 387 |
| 385 void WorkerPool::Shutdown() { | 388 void WorkerPool::Shutdown() { |
| 386 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); | 389 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); |
| 387 | 390 |
| 388 DCHECK(!in_dispatch_completion_callbacks_); | 391 DCHECK(!in_dispatch_completion_callbacks_); |
| 389 | 392 g_workerpool_inner.Pointer()->Shutdown(this); |
| 390 inner_->Shutdown(); | |
| 391 } | 393 } |
| 392 | 394 |
| 393 void WorkerPool::CheckForCompletedTasks() { | 395 void WorkerPool::CheckForCompletedTasks() { |
| 394 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); | 396 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); |
| 395 | 397 |
| 396 DCHECK(!in_dispatch_completion_callbacks_); | 398 DCHECK(!in_dispatch_completion_callbacks_); |
| 397 | 399 |
| 398 TaskVector completed_tasks; | 400 TaskVector completed_tasks; |
| 399 inner_->CollectCompletedTasks(&completed_tasks); | 401 g_workerpool_inner.Pointer()->CollectCompletedTasks(&completed_tasks, this); |
| 402 | |
| 400 ProcessCompletedTasks(completed_tasks); | 403 ProcessCompletedTasks(completed_tasks); |
| 401 } | 404 } |
| 402 | 405 |
| 403 void WorkerPool::ProcessCompletedTasks( | 406 void WorkerPool::ProcessCompletedTasks( |
| 404 const TaskVector& completed_tasks) { | 407 const TaskVector& completed_tasks) { |
| 405 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", | 408 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", |
| 406 "completed_task_count", completed_tasks.size()); | 409 "completed_task_count", completed_tasks.size()); |
| 407 | 410 |
| 408 // Worker pool instance is not reentrant while processing completed tasks. | 411 // Worker pool instance is not reentrant while processing completed tasks. |
| 409 in_dispatch_completion_callbacks_ = true; | 412 in_dispatch_completion_callbacks_ = true; |
| 410 | 413 |
| 411 for (TaskVector::const_iterator it = completed_tasks.begin(); | 414 for (TaskVector::const_iterator it = completed_tasks.begin(); |
| 412 it != completed_tasks.end(); | 415 it != completed_tasks.end(); |
| 413 ++it) { | 416 ++it) { |
| 414 internal::WorkerPoolTask* task = it->get(); | 417 internal::WorkerPoolTask* task = it->get(); |
| 415 | 418 |
| 416 task->WillComplete(); | 419 task->WillComplete(); |
| 417 task->CompleteOnOriginThread(); | 420 task->CompleteOnOriginThread(); |
| 418 task->DidComplete(); | 421 task->DidComplete(); |
| 419 } | 422 } |
| 420 | 423 |
| 421 in_dispatch_completion_callbacks_ = false; | 424 in_dispatch_completion_callbacks_ = false; |
| 422 } | 425 } |
| 423 | 426 |
| 424 void WorkerPool::SetTaskGraph(TaskGraph* graph) { | 427 void WorkerPool::SetTaskGraph(TaskGraph* graph) { |
| 425 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", | 428 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", |
| 426 "num_tasks", graph->size()); | 429 "num_tasks", graph->size()); |
| 427 | 430 |
| 428 DCHECK(!in_dispatch_completion_callbacks_); | 431 DCHECK(!in_dispatch_completion_callbacks_); |
| 432 | |
| 433 g_workerpool_inner.Pointer()->SetTaskGraph(graph, this); | |
| 434 } | |
| 429 | 435 |
| 430 inner_->SetTaskGraph(graph); | 436 DerivedInner::DerivedInner() |
| 437 : Inner((size_t)cc::switches::GetNumRasterThreads(), "CompositorRaster") { | |
| 431 } | 438 } |
| 432 | 439 |
| 433 } // namespace cc | 440 } // namespace cc |
| OLD | NEW |