Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/resources/worker_pool.h" | 5 #include "cc/resources/worker_pool.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <queue> | 8 #include <queue> |
| 9 | 9 |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| 11 #include "base/containers/hash_tables.h" | 11 #include "base/containers/hash_tables.h" |
| 12 #include "base/debug/trace_event.h" | 12 #include "base/debug/trace_event.h" |
| 13 #include "base/lazy_instance.h" | |
| 13 #include "base/strings/stringprintf.h" | 14 #include "base/strings/stringprintf.h" |
| 14 #include "base/synchronization/condition_variable.h" | 15 #include "base/synchronization/condition_variable.h" |
| 15 #include "base/threading/simple_thread.h" | 16 #include "base/threading/simple_thread.h" |
| 16 #include "base/threading/thread_restrictions.h" | 17 #include "base/threading/thread_restrictions.h" |
| 17 #include "cc/base/scoped_ptr_deque.h" | 18 #include "cc/base/scoped_ptr_deque.h" |
| 18 | 19 |
| 20 | |
| 19 namespace cc { | 21 namespace cc { |
| 20 | 22 |
| 21 namespace internal { | 23 namespace internal { |
| 22 | 24 |
| 23 WorkerPoolTask::WorkerPoolTask() | 25 WorkerPoolTask::WorkerPoolTask() |
| 24 : did_schedule_(false), | 26 : did_schedule_(false), |
| 25 did_run_(false), | 27 did_run_(false), |
| 26 did_complete_(false) { | 28 did_complete_(false) { |
| 27 } | 29 } |
| 28 | 30 |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 74 GraphNode::~GraphNode() { | 76 GraphNode::~GraphNode() { |
| 75 } | 77 } |
| 76 | 78 |
| 77 } // namespace internal | 79 } // namespace internal |
| 78 | 80 |
| 79 // Internal to the worker pool. Any data or logic that needs to be | 81 // Internal to the worker pool. Any data or logic that needs to be |
| 80 // shared between threads lives in this class. All members are guarded | 82 // shared between threads lives in this class. All members are guarded |
| 81 // by |lock_|. | 83 // by |lock_|. |
| 82 class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { | 84 class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { |
| 83 public: | 85 public: |
| 86 // Required for Lazy Instantiation | |
| 87 Inner(); | |
| 84 Inner(size_t num_threads, const std::string& thread_name_prefix); | 88 Inner(size_t num_threads, const std::string& thread_name_prefix); |
|
reveman
2013/11/15 17:21:41
One ctor please.
How about we create a derived cl
| |
| 85 virtual ~Inner(); | 89 virtual ~Inner(); |
| 86 | 90 |
| 87 void Shutdown(); | 91 void Shutdown(); |
| 88 | 92 |
| 89 // Schedule running of tasks in |graph|. Tasks previously scheduled but | 93 // Schedule running of tasks in |graph|. Tasks previously scheduled but |
| 90 // no longer needed will be canceled unless already running. Canceled | 94 // no longer needed will be canceled unless already running. Canceled |
| 91 // tasks are moved to |completed_tasks_| without being run. The result | 95 // tasks are moved to |completed_tasks_| without being run. The result |
| 92 // is that once scheduled, a task is guaranteed to end up in the | 96 // is that once scheduled, a task is guaranteed to end up in the |
| 93 // |completed_tasks_| queue even if they later get canceled by another | 97 // |completed_tasks_| queue even if they later get canceled by another |
| 94 // call to SetTaskGraph(). | 98 // call to SetTaskGraph(). |
| 95 void SetTaskGraph(TaskGraph* graph); | 99 void SetTaskGraph(TaskGraph* graph); |
| 96 | 100 |
| 97 // Collect all completed tasks in |completed_tasks|. | 101 // Collect all completed tasks in |completed_tasks|. |
| 98 void CollectCompletedTasks(TaskVector* completed_tasks); | 102 void CollectCompletedTasks(TaskVector* completed_tasks); |
| 99 | 103 |
| 104 // Required to be pulled out of Ctor, for lazy instantiation | |
| 105 void Start(); | |
| 106 // For setting number of threads from WorkerPool to WorkerPool::Inner | |
| 107 void SetNumThreads(size_t val) {num_threads_ = val;} | |
|
reveman
2013/11/15 17:21:41
This is not thread safe. One LTHI might change the
| |
| 108 | |
| 109 | |
| 100 private: | 110 private: |
| 101 class PriorityComparator { | 111 class PriorityComparator { |
| 102 public: | 112 public: |
| 103 bool operator()(const internal::GraphNode* a, | 113 bool operator()(const internal::GraphNode* a, |
| 104 const internal::GraphNode* b) { | 114 const internal::GraphNode* b) { |
| 105 // In this system, numerically lower priority is run first. | 115 // In this system, numerically lower priority is run first. |
| 106 if (a->priority() != b->priority()) | 116 if (a->priority() != b->priority()) |
| 107 return a->priority() > b->priority(); | 117 return a->priority() > b->priority(); |
| 108 | 118 |
| 109 // Run task with most dependents first when priority is the same. | 119 // Run task with most dependents first when priority is the same. |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 141 TaskQueue ready_to_run_tasks_; | 151 TaskQueue ready_to_run_tasks_; |
| 142 | 152 |
| 143 // This set contains all currently running tasks. | 153 // This set contains all currently running tasks. |
| 144 GraphNodeMap running_tasks_; | 154 GraphNodeMap running_tasks_; |
| 145 | 155 |
| 146 // Completed tasks not yet collected by origin thread. | 156 // Completed tasks not yet collected by origin thread. |
| 147 TaskVector completed_tasks_; | 157 TaskVector completed_tasks_; |
| 148 | 158 |
| 149 ScopedPtrDeque<base::DelegateSimpleThread> workers_; | 159 ScopedPtrDeque<base::DelegateSimpleThread> workers_; |
| 150 | 160 |
| 161 size_t num_threads_; | |
| 162 | |
| 151 DISALLOW_COPY_AND_ASSIGN(Inner); | 163 DISALLOW_COPY_AND_ASSIGN(Inner); |
| 152 }; | 164 }; |
| 153 | 165 |
| 166 // Lazy Instance of WorkerPool::Inner, | |
| 167 // This will enable sharing the worker thread across LTHI for same process | |
| 168 base::LazyInstance<WorkerPool::Inner> g_workerpool_inner; | |
| 169 | |
| 170 // Default Ctor required for Lazy Instantiation | |
| 171 WorkerPool::Inner::Inner() | |
| 172 : lock_(), | |
| 173 has_ready_to_run_tasks_cv_(&lock_), | |
| 174 next_thread_index_(0), | |
| 175 shutdown_(false) { | |
| 176 } | |
| 177 | |
| 154 WorkerPool::Inner::Inner( | 178 WorkerPool::Inner::Inner( |
| 155 size_t num_threads, const std::string& thread_name_prefix) | 179 size_t num_threads, const std::string& thread_name_prefix) |
| 156 : lock_(), | 180 : lock_(), |
| 157 has_ready_to_run_tasks_cv_(&lock_), | 181 has_ready_to_run_tasks_cv_(&lock_), |
| 158 next_thread_index_(0), | 182 next_thread_index_(0), |
| 159 shutdown_(false) { | 183 shutdown_(false) { |
| 160 base::AutoLock lock(lock_); | 184 base::AutoLock lock(lock_); |
| 161 | |
| 162 while (workers_.size() < num_threads) { | 185 while (workers_.size() < num_threads) { |
| 163 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( | 186 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( |
| 164 new base::DelegateSimpleThread( | 187 new base::DelegateSimpleThread( |
| 165 this, | 188 this, |
| 166 thread_name_prefix + | 189 thread_name_prefix + |
| 167 base::StringPrintf( | 190 base::StringPrintf( |
| 168 "Worker%u", | 191 "Worker%u", |
| 169 static_cast<unsigned>(workers_.size() + 1)).c_str())); | 192 static_cast<unsigned>(workers_.size() + 1)).c_str())); |
| 170 worker->Start(); | 193 worker->Start(); |
| 171 #if defined(OS_ANDROID) || defined(OS_LINUX) | 194 #if defined(OS_ANDROID) || defined(OS_LINUX) |
| 172 worker->SetThreadPriority(base::kThreadPriority_Background); | 195 worker->SetThreadPriority(base::kThreadPriority_Background); |
| 173 #endif | 196 #endif |
| 174 workers_.push_back(worker.Pass()); | 197 workers_.push_back(worker.Pass()); |
| 175 } | 198 } |
| 176 } | 199 } |
| 177 | 200 |
| 178 WorkerPool::Inner::~Inner() { | 201 WorkerPool::Inner::~Inner() { |
| 179 base::AutoLock lock(lock_); | 202 base::AutoLock lock(lock_); |
| 180 | |
| 181 DCHECK(shutdown_); | 203 DCHECK(shutdown_); |
| 182 | 204 |
| 183 DCHECK_EQ(0u, pending_tasks_.size()); | 205 DCHECK_EQ(0u, pending_tasks_.size()); |
| 184 DCHECK_EQ(0u, ready_to_run_tasks_.size()); | 206 DCHECK_EQ(0u, ready_to_run_tasks_.size()); |
| 185 DCHECK_EQ(0u, running_tasks_.size()); | 207 DCHECK_EQ(0u, running_tasks_.size()); |
| 186 DCHECK_EQ(0u, completed_tasks_.size()); | 208 DCHECK_EQ(0u, completed_tasks_.size()); |
| 187 } | 209 } |
| 188 | 210 |
| 211 // Required to be pulled out of Ctor, for lazy instantiation | |
| 212 void WorkerPool::Inner::Start() { | |
| 213 base::AutoLock lock(lock_); | |
| 214 | |
| 215 std::string thread_name_prefix = "CompositorRaster"; | |
| 216 size_t num_threads = num_threads_; | |
| 217 | |
| 218 | |
| 219 while (workers_.size() < num_threads) { | |
| 220 scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( | |
| 221 new base::DelegateSimpleThread( | |
| 222 this, | |
| 223 thread_name_prefix + | |
| 224 base::StringPrintf( | |
| 225 "Worker%u", | |
| 226 static_cast<unsigned>(workers_.size() + 1)).c_str())); | |
| 227 worker->Start(); | |
| 228 | |
| 229 #if defined(OS_ANDROID) || defined(OS_LINUX) | |
| 230 worker->SetThreadPriority(base::kThreadPriority_Background); | |
| 231 #endif | |
| 232 workers_.push_back(worker.Pass()); | |
| 233 } | |
| 234 } | |
| 235 | |
| 189 void WorkerPool::Inner::Shutdown() { | 236 void WorkerPool::Inner::Shutdown() { |
| 190 { | 237 { |
| 191 base::AutoLock lock(lock_); | 238 base::AutoLock lock(lock_); |
| 192 | 239 |
| 193 DCHECK(!shutdown_); | 240 DCHECK(!shutdown_); |
| 194 shutdown_ = true; | 241 shutdown_ = true; |
| 195 | 242 |
| 196 // Wake up a worker so it knows it should exit. This will cause all workers | 243 // Wake up a worker so it knows it should exit. This will cause all workers |
| 197 // to exit as each will wake up another worker before exiting. | 244 // to exit as each will wake up another worker before exiting. |
| 198 has_ready_to_run_tasks_cv_.Signal(); | 245 has_ready_to_run_tasks_cv_.Signal(); |
| (...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 366 | 413 |
| 367 // Finally add task to |completed_tasks_|. | 414 // Finally add task to |completed_tasks_|. |
| 368 completed_tasks_.push_back(task); | 415 completed_tasks_.push_back(task); |
| 369 } | 416 } |
| 370 | 417 |
| 371 // We noticed we should exit. Wake up the next worker so it knows it should | 418 // We noticed we should exit. Wake up the next worker so it knows it should |
| 372 // exit as well (because the Shutdown() code only signals once). | 419 // exit as well (because the Shutdown() code only signals once). |
| 373 has_ready_to_run_tasks_cv_.Signal(); | 420 has_ready_to_run_tasks_cv_.Signal(); |
| 374 } | 421 } |
| 375 | 422 |
| 376 WorkerPool::WorkerPool(size_t num_threads, | 423 WorkerPool::WorkerPool(size_t num_threads, |
|
reveman
2013/11/15 17:21:41
num_threads can't be a LTHI setting anymore as the
| |
| 377 const std::string& thread_name_prefix) | 424 const std::string& thread_name_prefix) |
| 378 : in_dispatch_completion_callbacks_(false), | 425 : in_dispatch_completion_callbacks_(false) { |
| 379 inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) { | 426 g_workerpool_inner.Pointer()->SetNumThreads(num_threads); |
| 427 g_workerpool_inner.Pointer()->Start(); | |
|
reveman
2013/11/15 17:21:41
What's stopping Start() from being called multiple
| |
| 380 } | 428 } |
| 381 | 429 |
| 382 WorkerPool::~WorkerPool() { | 430 WorkerPool::~WorkerPool() { |
| 383 } | 431 } |
| 384 | 432 |
| 385 void WorkerPool::Shutdown() { | 433 void WorkerPool::Shutdown() { |
| 386 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); | 434 TRACE_EVENT0("cc", "WorkerPool::Shutdown"); |
| 387 | 435 |
| 388 DCHECK(!in_dispatch_completion_callbacks_); | 436 DCHECK(!in_dispatch_completion_callbacks_); |
| 389 | 437 g_workerpool_inner.Pointer()->Shutdown(); |
|
reveman
2013/11/15 17:21:41
You need to introduce a task namespace for each Wo
| |
| 390 inner_->Shutdown(); | |
| 391 } | 438 } |
| 392 | 439 |
| 393 void WorkerPool::CheckForCompletedTasks() { | 440 void WorkerPool::CheckForCompletedTasks() { |
| 394 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); | 441 TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); |
| 395 | 442 |
| 396 DCHECK(!in_dispatch_completion_callbacks_); | 443 DCHECK(!in_dispatch_completion_callbacks_); |
| 397 | 444 |
| 398 TaskVector completed_tasks; | 445 TaskVector completed_tasks; |
| 399 inner_->CollectCompletedTasks(&completed_tasks); | 446 g_workerpool_inner.Pointer()->CollectCompletedTasks(&completed_tasks); |
|
reveman
2013/11/15 17:21:41
Same here. Task namespace needed for this to work.
| |
| 400 ProcessCompletedTasks(completed_tasks); | 447 ProcessCompletedTasks(completed_tasks); |
| 401 } | 448 } |
| 402 | 449 |
| 403 void WorkerPool::ProcessCompletedTasks( | 450 void WorkerPool::ProcessCompletedTasks( |
| 404 const TaskVector& completed_tasks) { | 451 const TaskVector& completed_tasks) { |
| 405 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", | 452 TRACE_EVENT1("cc", "WorkerPool::ProcessCompletedTasks", |
| 406 "completed_task_count", completed_tasks.size()); | 453 "completed_task_count", completed_tasks.size()); |
| 407 | 454 |
| 408 // Worker pool instance is not reentrant while processing completed tasks. | 455 // Worker pool instance is not reentrant while processing completed tasks. |
| 409 in_dispatch_completion_callbacks_ = true; | 456 in_dispatch_completion_callbacks_ = true; |
| 410 | 457 |
| 411 for (TaskVector::const_iterator it = completed_tasks.begin(); | 458 for (TaskVector::const_iterator it = completed_tasks.begin(); |
| 412 it != completed_tasks.end(); | 459 it != completed_tasks.end(); |
| 413 ++it) { | 460 ++it) { |
| 414 internal::WorkerPoolTask* task = it->get(); | 461 internal::WorkerPoolTask* task = it->get(); |
| 415 | 462 |
| 416 task->WillComplete(); | 463 task->WillComplete(); |
| 417 task->CompleteOnOriginThread(); | 464 task->CompleteOnOriginThread(); |
| 418 task->DidComplete(); | 465 task->DidComplete(); |
| 419 } | 466 } |
| 420 | 467 |
| 421 in_dispatch_completion_callbacks_ = false; | 468 in_dispatch_completion_callbacks_ = false; |
| 422 } | 469 } |
| 423 | 470 |
| 424 void WorkerPool::SetTaskGraph(TaskGraph* graph) { | 471 void WorkerPool::SetTaskGraph(TaskGraph* graph) { |
| 425 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", | 472 TRACE_EVENT1("cc", "WorkerPool::SetTaskGraph", |
| 426 "num_tasks", graph->size()); | 473 "num_tasks", graph->size()); |
| 427 | 474 |
| 428 DCHECK(!in_dispatch_completion_callbacks_); | 475 DCHECK(!in_dispatch_completion_callbacks_); |
| 429 | 476 g_workerpool_inner.Pointer()->SetTaskGraph(graph); |
|
reveman
2013/11/15 17:21:41
And here. Task namespace needed for this to work.
| |
| 430 inner_->SetTaskGraph(graph); | 477 } |
| 431 } | |
| 432 | 478 |
| 433 } // namespace cc | 479 } // namespace cc |
| OLD | NEW |