Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3459)

Unified Diff: cc/resources/worker_pool.cc

Issue 73923003: Shared Raster Worker Threads (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Task namespace for WorkerPool Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« cc/resources/worker_pool.h ('K') | « cc/resources/worker_pool.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: cc/resources/worker_pool.cc
diff --git a/cc/resources/worker_pool.cc b/cc/resources/worker_pool.cc
old mode 100644
new mode 100755
index dca0c704f09dbc2559bf3f4adf860724ce9a6240..c72a278be78c462dc195c88d20034c18f80c2dd7
--- a/cc/resources/worker_pool.cc
+++ b/cc/resources/worker_pool.cc
@@ -10,11 +10,13 @@
#include "base/bind.h"
#include "base/containers/hash_tables.h"
#include "base/debug/trace_event.h"
+#include "base/lazy_instance.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/condition_variable.h"
#include "base/threading/simple_thread.h"
#include "base/threading/thread_restrictions.h"
#include "cc/base/scoped_ptr_deque.h"
+#include "cc/base/switches.h"
namespace cc {
@@ -84,7 +86,7 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
Inner(size_t num_threads, const std::string& thread_name_prefix);
virtual ~Inner();
- void Shutdown();
+ void Shutdown(WorkerPool* wp);
reveman 2013/11/21 16:36:10 I don't think we should change Shutdown(). That's
// Schedule running of tasks in |graph|. Tasks previously scheduled but
// no longer needed will be canceled unless already running. Canceled
@@ -92,10 +94,11 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
// is that once scheduled, a task is guaranteed to end up in the
// |completed_tasks_| queue even if they later get canceled by another
// call to SetTaskGraph().
- void SetTaskGraph(TaskGraph* graph);
+
+ void SetTaskGraph(TaskGraph* graph, WorkerPool* wp);
reveman 2013/11/21 16:36:10 Prefer if you pass WorkerPool pointer as first par
// Collect all completed tasks in |completed_tasks|.
- void CollectCompletedTasks(TaskVector* completed_tasks);
+ void CollectCompletedTasks(TaskVector* completed_tasks, WorkerPool* wp);
reveman 2013/11/21 16:36:10 Here and above. Please don't abbreviate the parame
private:
class PriorityComparator {
@@ -131,8 +134,6 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
// are pending.
bool shutdown_;
- // This set contains all pending tasks.
- GraphNodeMap pending_tasks_;
// Ordered set of tasks that are ready to run.
typedef std::priority_queue<internal::GraphNode*,
@@ -140,17 +141,22 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
PriorityComparator> TaskQueue;
TaskQueue ready_to_run_tasks_;
- // This set contains all currently running tasks.
- GraphNodeMap running_tasks_;
-
- // Completed tasks not yet collected by origin thread.
- TaskVector completed_tasks_;
reveman 2013/11/21 16:36:10 I don't think we should move any of these out of t
ScopedPtrDeque<base::DelegateSimpleThread> workers_;
+ // Maintain a WorkerPool instance per Inner for Task Namespace
+ WorkerPool* wp_;
reveman 2013/11/21 16:36:10 If you keep a pointer to one WorkerPool instance t
+
DISALLOW_COPY_AND_ASSIGN(Inner);
};
+class CC_EXPORT DerivedInner : public WorkerPool::Inner {
+ public:
+ DerivedInner();
+};
+
+base::LazyInstance<DerivedInner> g_workerpool_inner;
+
WorkerPool::Inner::Inner(
size_t num_threads, const std::string& thread_name_prefix)
: lock_(),
@@ -158,7 +164,6 @@ WorkerPool::Inner::Inner(
next_thread_index_(0),
shutdown_(false) {
base::AutoLock lock(lock_);
-
while (workers_.size() < num_threads) {
scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr(
new base::DelegateSimpleThread(
@@ -177,16 +182,14 @@ WorkerPool::Inner::Inner(
WorkerPool::Inner::~Inner() {
base::AutoLock lock(lock_);
-
DCHECK(shutdown_);
-
- DCHECK_EQ(0u, pending_tasks_.size());
+ DCHECK_EQ(0u, wp_->pending_tasks_.size());
DCHECK_EQ(0u, ready_to_run_tasks_.size());
- DCHECK_EQ(0u, running_tasks_.size());
- DCHECK_EQ(0u, completed_tasks_.size());
+ DCHECK_EQ(0u, wp_->running_tasks_.size());
+ DCHECK_EQ(0u, wp_->completed_tasks_.size());
}
-void WorkerPool::Inner::Shutdown() {
+void WorkerPool::Inner::Shutdown(WorkerPool* wp) {
{
base::AutoLock lock(lock_);
@@ -207,10 +210,11 @@ void WorkerPool::Inner::Shutdown() {
}
}
-void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) {
+void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph, WorkerPool* wp) {
// It is OK to call SetTaskGraph() after shutdown if |graph| is empty.
DCHECK(graph->empty() || !shutdown_);
+ wp_ = wp;
vivekg 2013/11/22 04:44:50 Wouldn't it be better to have a DCHECK(wp) to ensu
GraphNodeMap new_pending_tasks;
GraphNodeMap new_running_tasks;
TaskQueue new_ready_to_run_tasks;
@@ -222,8 +226,8 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) {
// First remove all completed tasks from |new_pending_tasks| and
// adjust number of dependencies.
- for (TaskVector::iterator it = completed_tasks_.begin();
- it != completed_tasks_.end(); ++it) {
+ for (TaskVector::iterator it = wp_->completed_tasks_.begin();
+ it != wp_->completed_tasks_.end(); ++it) {
internal::WorkerPoolTask* task = it->get();
scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase(
@@ -239,8 +243,8 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) {
}
// Build new running task set.
- for (GraphNodeMap::iterator it = running_tasks_.begin();
- it != running_tasks_.end(); ++it) {
+ for (GraphNodeMap::iterator it = wp_->running_tasks_.begin();
+ it != wp_->running_tasks_.end(); ++it) {
internal::WorkerPoolTask* task = it->first;
// Transfer scheduled task value from |new_pending_tasks| to
// |new_running_tasks| if currently running. Value must be set to
@@ -268,28 +272,29 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) {
new_ready_to_run_tasks.push(node);
// Erase the task from old pending tasks.
- pending_tasks_.erase(task);
+ wp_->pending_tasks_.erase(task);
}
- completed_tasks_.reserve(completed_tasks_.size() + pending_tasks_.size());
+ wp_->completed_tasks_.reserve(
+ wp_->completed_tasks_.size() + wp_->pending_tasks_.size());
// The items left in |pending_tasks_| need to be canceled.
- for (GraphNodeMap::const_iterator it = pending_tasks_.begin();
- it != pending_tasks_.end();
+ for (GraphNodeMap::const_iterator it = wp_->pending_tasks_.begin();
+ it != wp_->pending_tasks_.end();
++it) {
- completed_tasks_.push_back(it->first);
+ wp_->completed_tasks_.push_back(it->first);
}
// Swap task sets.
// Note: old tasks are intentionally destroyed after releasing |lock_|.
- pending_tasks_.swap(new_pending_tasks);
- running_tasks_.swap(new_running_tasks);
+ wp_->pending_tasks_.swap(new_pending_tasks);
+ wp_->running_tasks_.swap(new_running_tasks);
std::swap(ready_to_run_tasks_, new_ready_to_run_tasks);
// If |ready_to_run_tasks_| is empty, it means we either have
// running tasks, or we have no pending tasks.
- DCHECK(!ready_to_run_tasks_.empty() ||
- (pending_tasks_.empty() || !running_tasks_.empty()));
+ DCHECK(!ready_to_run_tasks_.empty()
+ ||(wp_->pending_tasks_.empty() || !wp_->running_tasks_.empty()));
// If there is more work available, wake up worker thread.
if (!ready_to_run_tasks_.empty())
@@ -297,23 +302,22 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) {
}
}
-void WorkerPool::Inner::CollectCompletedTasks(TaskVector* completed_tasks) {
+void WorkerPool::Inner::CollectCompletedTasks(
+ TaskVector* completed_tasks, WorkerPool* wp) {
base::AutoLock lock(lock_);
-
DCHECK_EQ(0u, completed_tasks->size());
- completed_tasks->swap(completed_tasks_);
+ completed_tasks->swap(wp->completed_tasks_);
}
-
void WorkerPool::Inner::Run() {
base::AutoLock lock(lock_);
// Get a unique thread index.
int thread_index = next_thread_index_++;
-
+
while (true) {
if (ready_to_run_tasks_.empty()) {
// Exit when shutdown is set and no more tasks are pending.
- if (shutdown_ && pending_tasks_.empty())
+ if (shutdown_ && wp_->pending_tasks_.empty())
break;
// Wait for more tasks.
@@ -327,9 +331,10 @@ void WorkerPool::Inner::Run() {
ready_to_run_tasks_.pop();
// Move task from |pending_tasks_| to |running_tasks_|.
- DCHECK(pending_tasks_.contains(task.get()));
- DCHECK(!running_tasks_.contains(task.get()));
- running_tasks_.set(task.get(), pending_tasks_.take_and_erase(task.get()));
+ DCHECK(wp_->pending_tasks_.contains(task.get()));
+ DCHECK(!wp_->running_tasks_.contains(task.get()));
+ wp_->running_tasks_.set(
+ task.get(), wp_->pending_tasks_.take_and_erase(task.get()));
// There may be more work available, so wake up another worker thread.
has_ready_to_run_tasks_cv_.Signal();
@@ -342,13 +347,12 @@ void WorkerPool::Inner::Run() {
task->RunOnWorkerThread(thread_index);
}
-
// This will mark task as finished running.
task->DidRun();
// Now iterate over all dependents to remove dependency and check
// if they are ready to run.
- scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase(
+ scoped_ptr<internal::GraphNode> node = wp_->running_tasks_.take_and_erase(
task.get());
if (node) {
for (internal::GraphNode::Vector::const_iterator it =
@@ -365,7 +369,7 @@ void WorkerPool::Inner::Run() {
}
// Finally add task to |completed_tasks_|.
- completed_tasks_.push_back(task);
+ wp_->completed_tasks_.push_back(task);
}
// We noticed we should exit. Wake up the next worker so it knows it should
@@ -375,8 +379,7 @@ void WorkerPool::Inner::Run() {
WorkerPool::WorkerPool(size_t num_threads,
const std::string& thread_name_prefix)
- : in_dispatch_completion_callbacks_(false),
- inner_(make_scoped_ptr(new Inner(num_threads, thread_name_prefix))) {
+ : in_dispatch_completion_callbacks_(false) {
}
WorkerPool::~WorkerPool() {
@@ -386,8 +389,7 @@ void WorkerPool::Shutdown() {
TRACE_EVENT0("cc", "WorkerPool::Shutdown");
DCHECK(!in_dispatch_completion_callbacks_);
-
- inner_->Shutdown();
+ g_workerpool_inner.Pointer()->Shutdown(this);
}
void WorkerPool::CheckForCompletedTasks() {
@@ -396,7 +398,8 @@ void WorkerPool::CheckForCompletedTasks() {
DCHECK(!in_dispatch_completion_callbacks_);
TaskVector completed_tasks;
- inner_->CollectCompletedTasks(&completed_tasks);
+ g_workerpool_inner.Pointer()->CollectCompletedTasks(&completed_tasks, this);
+
ProcessCompletedTasks(completed_tasks);
}
@@ -426,8 +429,12 @@ void WorkerPool::SetTaskGraph(TaskGraph* graph) {
"num_tasks", graph->size());
DCHECK(!in_dispatch_completion_callbacks_);
+
+ g_workerpool_inner.Pointer()->SetTaskGraph(graph, this);
+}
- inner_->SetTaskGraph(graph);
+DerivedInner::DerivedInner()
+ : Inner((size_t)cc::switches::GetNumRasterThreads(), "CompositorRaster") {
}
} // namespace cc
« cc/resources/worker_pool.h ('K') | « cc/resources/worker_pool.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698