Index: content/common/sequenced_worker_pool.cc |
=================================================================== |
--- content/common/sequenced_worker_pool.cc (revision 0) |
+++ content/common/sequenced_worker_pool.cc (revision 0) |
@@ -0,0 +1,393 @@ |
+// Copyright (c) 2011 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/common/sequenced_worker_pool.h" |
+ |
+#include <deque> |
+#include <set> |
+ |
+#include "base/atomicops.h" |
+#include "base/bind.h" |
+#include "base/memory/scoped_ptr.h" |
+#include "base/metrics/histogram.h" |
+#include "base/threading/thread.h" |
+#include "base/stringprintf.h" |
+#include "base/synchronization/waitable_event.h" |
+ |
+namespace { |
+ |
+struct SequencedTask { |
+ int sequence_token_id; |
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior; |
+ tracked_objects::Location location; |
+ base::Closure task; |
+}; |
+ |
+class Worker { |
+ public: |
+ Worker(int thread_number) |
+ : thread_(StringPrintf("Browser worker %d", thread_number).c_str()), |
+ current_shutdown_mode_(SequencedWorkerPool::CONTINUE_ON_SHUTDOWN) { |
+ thread_.Start(); |
+ } |
+ ~Worker() { |
+ } |
+ |
+ // Posts a task to the worker's message loop for running. The actual task |
+ // to run should be Inner's RunTask. The SequencedTask is passed in only |
+ // to get statistics out of. |
+ // |
+ // This should only be called from within Inner's lock. |
jar (doing other things)
2011/11/23 20:08:16
It is surprising to require a lock (when posting a
|
+ void PostTask(const SequencedTask& sequenced_info, |
+ const base::Closure& task) { |
+ // Use the original task birthplace as the source for this call so we can |
+ // trace back who made us do this work. |
+ thread_.message_loop()->PostTask(sequenced_info.location, task); |
jar (doing other things)
2011/11/23 20:08:16
It is more surprising to see a local lock held as
|
+ current_shutdown_mode_ = sequenced_info.shutdown_behavior; |
+ } |
+ |
+ // Cleans up after a task is complete. This should be called from within |
+ // Inner's lock as soon as the task is complete. |
+ void WorkComplete() { |
+ current_shutdown_mode_ = SequencedWorkerPool::CONTINUE_ON_SHUTDOWN; |
+ } |
+ |
+ // When running a task, the shutdown mode is stored on the worker. It will |
+ // be INTERRUPT_ON_SHUTDOWN if there is no running task. |
+ // |
+ // This must only be called when holding the Inner class' lock. |
+ const SequencedWorkerPool::WorkerShutdown current_shutdown_mode() const { |
+ return current_shutdown_mode_; |
+ } |
+ |
+ private: |
+ base::Thread thread_; |
+ |
+ SequencedWorkerPool::WorkerShutdown current_shutdown_mode_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(Worker); |
+}; |
+ |
+} // namespace |
+ |
+// Inner ---------------------------------------------------------------------- |
+ |
+class SequencedWorkerPool::Inner |
jar (doing other things)
2011/11/23 20:08:16
Suggest you add a comment to clarify that this is
|
+ : public base::RefCountedThreadSafe<SequencedWorkerPool::Inner> { |
+ public: |
+ Inner(size_t max_threads); |
+ |
+ // Returns a unique token that can be used to sequence tasks posted to |
+ // PostSequencedWorkerTask(). Valid tokens are alwys nonzero. |
+ SequenceToken GetSequenceToken(); |
+ |
+ // Posts a task. See PostSequencedWorkerTask. The token ID will be 0 for |
+ // unsequenced tasks. |
+ bool PostTask(int sequence_token_id, |
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior, |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task); |
+ |
+ void Shutdown(); |
+ |
+ void SetTestingObserver(SequencedWorkerPool::TestingObserver* observer); |
+ |
+ private: |
+ // Attempts to schedule work on an available thread. Call this when a |
+ // task is added to the pending_tasks_ list or a thread is added to the |
+ // idle_threads_ list. The lock must be already held for this function. |
+ void ScheduleWork(); |
+ |
+ // Worker task that actually runs on a worker thread to execute the given |
+ // task. The worker it's running on is passed as the first argument. |
+ void RunTask(Worker* this_worker, const SequencedTask& task); |
+ |
+ // Returns true if any worker is running a shutdown-blocking task. This |
+ // includes BLOCK_SHUTDOWN and also SKIP_ON_SHUTDOWN tasks that are already |
+ // running (since they must be run to completion). It must be called with |
+ // the lock held. |
+ bool IsRunningBlockingTask() const; |
+ |
+ // The last sequence number used. Managed by GetSequenceToken, since this |
+ // only does threadsafe increment operations, you do not need to hold the |
+ // lock. |
+ volatile base::subtle::Atomic32 last_sequence_number_; |
+ |
+ // This lock protects |everything in this class|. Do not read or modify |
+ // anything without holding this lock. Do not block while holding this |
+ // lock. |
+ base::Lock lock_; |
+ |
+ // The maximum number of worker threads we'll create. |
+ size_t max_threads_; |
+ |
+ // Owning pointers to all threads we've created so far. Since we lazily |
+ // create threads, this may be less than max_threads_ and will be initially |
+ // empty. Each of these pointers will also appear either in idle_threads_ |
+ // or running_threads_. |
+ std::vector<linked_ptr<Worker> > threads_; |
+ |
+ // Lists all currently idle worker threads. These pointers are non-owning, |
+ // the threads_ array manages their lifetimes. |
+ std::deque<Worker*> idle_threads_; |
jar (doing other things)
2011/11/23 20:08:16
You should use a stack, as I don't think there is
|
+ |
+ // The opposite of idle_threads_, this contains non-owning pointers to all |
+ // currently running or scheduled threads. |
+ std::set<Worker*> running_threads_; |
+ |
+ // In-order list of all pending tasks. These are tasks waiting for a thread |
+ // to run on or that are blocked on a previous task in their sequence. |
+ // |
+ // We maintain the pending_task_count_ separately for metrics because |
+ // list.size() can be linear time. |
+ std::list<SequencedTask> pending_tasks_; |
+ size_t pending_task_count_; |
+ |
+ // Lists all sequence tokens currently executing. |
+ std::set<int> current_sequences_; |
+ |
+ // Set when the app is terminating and no further tasks should be allowed. |
+ bool terminated_; |
+ |
+ SequencedWorkerPool::TestingObserver* testing_observer_; |
+ |
+ // Created lazily when terminated_ is set and there are pending tasks, this |
+ // is signaled by ScheduleWork whel all blocking tasks have completed. |
+ scoped_ptr<base::WaitableEvent> shutdown_complete_; |
+}; |
+ |
+SequencedWorkerPool::Inner::Inner(size_t max_threads) |
+ : last_sequence_number_(0), |
+ max_threads_(max_threads), |
+ pending_task_count_(0), |
+ terminated_(false) { |
+} |
+ |
+SequencedWorkerPool::SequenceToken |
+SequencedWorkerPool::Inner::GetSequenceToken() { |
jar (doing other things)
2011/11/23 20:08:16
Cool as this is... I'm suspicious that we need an
brettw
2011/11/23 21:01:13
I think an enum destroys the whole point of this p
|
+ base::subtle::Atomic32 result = |
+ base::subtle::NoBarrier_AtomicIncrement(&last_sequence_number_, 1); |
+ return SequenceToken(static_cast<int>(result)); |
+} |
+ |
+bool SequencedWorkerPool::Inner::PostTask( |
+ int sequence_token_id, |
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior, |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task) { |
+ base::AutoLock lock(lock_); |
+ |
+ if (terminated_) |
+ return false; |
+ |
+ SequencedTask sequenced; |
+ sequenced.sequence_token_id = sequence_token_id; |
+ sequenced.shutdown_behavior = shutdown_behavior; |
+ sequenced.location = from_here; |
+ sequenced.task = task; |
+ pending_tasks_.push_back(sequenced); |
+ pending_task_count_++; |
+ |
+ ScheduleWork(); |
+ return true; |
+} |
+ |
+void SequencedWorkerPool::Inner::Shutdown() { |
jar (doing other things)
2011/11/23 20:08:16
This function is written as a blocking call. I mu
brettw
2011/11/23 21:01:13
This has to be the main thread. It has to block si
|
+ // Mark us as terminated and go through and drop all tasks that aren't |
+ // required to run on shutdown. Since no new tasks will get posted once the |
+ // terminated flag is set, this ensures that all remaining tasks are required |
+ // for shutdown whenever the termianted_ flag is set. |
+ { |
+ base::AutoLock lock(lock_); |
+ DCHECK(!terminated_); |
+ terminated_ = true; |
+ |
+ std::list<SequencedTask>::iterator i = pending_tasks_.begin(); |
+ while (i != pending_tasks_.end()) { |
+ if (i->shutdown_behavior == BLOCK_SHUTDOWN) { |
+ i++; |
+ } else { |
+ i = pending_tasks_.erase(i); |
jar (doing other things)
2011/11/23 20:08:16
Does this potentially induce a task destructor? D
|
+ pending_task_count_--; |
+ } |
+ } |
+ DCHECK_EQ(pending_tasks_.size(), pending_task_count_); |
+ |
+ if (pending_tasks_.empty() && !IsRunningBlockingTask()) { |
+ // There are no pending or running tasks blocking shutdown, we're done. |
+ return; |
+ } |
+ |
+ // Need to wait for some tasks, create the event. |
+ DCHECK(!shutdown_complete_.get()); |
+ shutdown_complete_.reset(new base::WaitableEvent(false, false)); |
+ } |
+ |
+ // If we get here, we know we're either waiting on a blocking task that's |
+ // currently running, waiting on a blocking task that hasn't been scheduled |
+ // yet, or both. Block on the "queue empty" event to know when all tasks are |
+ // complete. This must be done outside the lock. |
+ if (testing_observer_) |
+ testing_observer_->WillWaitForShutdown(); |
+ |
+ base::TimeTicks shutdown_wait_begin = base::TimeTicks::Now(); |
+ shutdown_complete_->Wait(); |
+ UMA_HISTOGRAM_TIMES("SequencedWorkerPool.ShutdownDelayTime", |
+ base::TimeTicks::Now() - shutdown_wait_begin); |
+} |
+ |
+void SequencedWorkerPool::Inner::SetTestingObserver( |
+ SequencedWorkerPool::TestingObserver* observer) { |
+ base::AutoLock lock(lock_); |
+ testing_observer_ = observer; |
+} |
+ |
+void SequencedWorkerPool::Inner::ScheduleWork() { |
jar (doing other things)
2011/11/23 20:08:16
You might consider adding a comment that this func
|
+ lock_.AssertAcquired(); |
+ |
+ DCHECK_EQ(pending_tasks_.size(), pending_task_count_); |
+ UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.TaskCount", |
+ pending_task_count_); |
+ |
+ if (terminated_ && shutdown_complete_.get()) { |
+ // When the app is terminating, check for "no more blocking work" and |
+ // signal if shutdown tasks are complete. |
+ if (pending_tasks_.empty() && !IsRunningBlockingTask()) { |
+ shutdown_complete_->Signal(); |
+ return; |
+ } |
+ } |
+ |
+ if (pending_tasks_.empty() || |
+ (idle_threads_.empty() && threads_.size() == max_threads_)) |
+ return; // No work to schedule or no threads to schedule them on. |
+ |
+ // Find the next task with a sequence token that's not currently in use. |
+ // If the token is in use, that means another thread is running something |
+ // in that sequence, and we can't run it without going out-of-order. |
+ // |
+ // This algorithm is simple and fair, but inefficient in some cases. For |
+ // example, say somebody schedules 1000 slow tasks with the same sequence |
+ // number. We'll have to go through all those tasks each time we feel like |
+ // there might be work to schedule. If this proves to be a problem, we |
+ // should make this more efficient. |
+ // |
+ // One possible enhancement would be to keep a map from sequence ID to a |
+ // list of pending but currently blocked SequencedTasks for that ID. |
+ // When a worker finishes a task of one sequence token, it can pick up the |
+ // next one from that token right away. |
jar (doing other things)
2011/11/23 20:08:16
That is a good optimization for efficiency... but
|
+ // |
+ // This may lead to starvation if there are sufficient numbers of sequences |
+ // in use. To alleviate this, we could add an incrementing priority counter |
+ // to each SequencedTask. Then maintain a priority_queue of all runnable |
+ // tasks, sorted by priority counter. When a sequenced task is completed |
+ // we would pop the head element off of that tasks pending list and add it |
+ // to the priority queue. Then we would run the first item in the priority |
+ // queue. |
+ int unrunnable_tasks = 0; |
+ for (std::list<SequencedTask>::iterator i = pending_tasks_.begin(); |
+ i != pending_tasks_.end(); ++i) { |
+ if (!i->sequence_token_id || |
+ current_sequences_.find(i->sequence_token_id) == |
+ current_sequences_.end()) { |
+ // This token is free, run this task on the first available worker, |
+ // creating one if necessary. |
+ Worker* worker; |
jar (doing other things)
2011/11/23 20:08:16
Should this be a linked_ptr?
|
+ if (idle_threads_.empty()) { |
+ // We should have early exited above if we're out of threads to |
+ // schedule, so there should always be a free slot. |
+ DCHECK(threads_.size() < max_threads_); |
+ worker = new Worker(threads_.size()); |
+ threads_.push_back(linked_ptr<Worker>(worker)); |
+ } else { |
+ worker = idle_threads_.front(); |
+ idle_threads_.pop_front(); |
+ } |
+ running_threads_.insert(worker); |
+ |
+ // Mark the task's sequence number as in use. |
+ if (i->sequence_token_id) |
+ current_sequences_.insert(i->sequence_token_id); |
+ |
+ worker->PostTask(*i, base::Bind(&Inner::RunTask, this, worker, *i)); |
jar (doing other things)
2011/11/23 20:08:16
Now that we have the worker thread out of the idle
|
+ pending_tasks_.erase(i); |
+ pending_task_count_--; |
+ break; |
+ } |
+ unrunnable_tasks++; |
+ } |
+ |
+ // Track the number of tasks we had to skip over to see if we should be |
+ // making this more efficient. If this number ever becomes large or is |
+ // frequently "some", we should consider the optimization above. |
+ UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.UnrunnableTaskCount", |
+ unrunnable_tasks); |
+} |
+ |
+void SequencedWorkerPool::Inner::RunTask(Worker* this_worker, |
+ const SequencedTask& task) { |
+ task.task.Run(); |
+ |
+ // Now that this thread is free, mark ourselves and try to schedule more. |
+ { |
+ base::AutoLock lock(lock_); |
+ this_worker->WorkComplete(); |
+ |
+ if (task.sequence_token_id) |
+ current_sequences_.erase(task.sequence_token_id); |
+ running_threads_.erase(this_worker); |
+ idle_threads_.push_front(this_worker); |
+ |
+ ScheduleWork(); |
+ } |
+} |
+ |
+bool SequencedWorkerPool::Inner::IsRunningBlockingTask() const { |
+ lock_.AssertAcquired(); |
+ |
+ for (std::set<Worker*>::const_iterator i = running_threads_.begin(); |
+ i != running_threads_.end(); ++i) { |
+ if ((*i)->current_shutdown_mode() == SequencedWorkerPool::BLOCK_SHUTDOWN || |
+ (*i)->current_shutdown_mode() == SequencedWorkerPool::SKIP_ON_SHUTDOWN) |
+ return true; |
+ } |
+ return false; |
+} |
+ |
+// SequencedWorkerPool -------------------------------------------------------- |
+ |
+SequencedWorkerPool::SequencedWorkerPool(size_t max_threads) |
+ : inner_(new Inner(max_threads)) { |
+} |
+ |
+SequencedWorkerPool::~SequencedWorkerPool() { |
+} |
+ |
+SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetSequenceToken() { |
+ return inner_->GetSequenceToken(); |
+} |
+ |
+bool SequencedWorkerPool::PostWorkerTask( |
+ WorkerShutdown shutdown_behavior, |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task) { |
+ return inner_->PostTask(0, shutdown_behavior, from_here, task); |
+} |
+ |
+bool SequencedWorkerPool::PostSequencedWorkerTask( |
+ SequenceToken sequence_token, |
+ WorkerShutdown shutdown_behavior, |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task) { |
+ return inner_->PostTask(sequence_token.id_, shutdown_behavior, |
+ from_here, task); |
+} |
+ |
+void SequencedWorkerPool::Shutdown() { |
+ inner_->Shutdown(); |
+} |
+ |
+void SequencedWorkerPool::SetTestingObserver(TestingObserver* observer) { |
+ inner_->SetTestingObserver(observer); |
+} |