Index: content/common/sequenced_worker_pool.cc |
=================================================================== |
--- content/common/sequenced_worker_pool.cc (revision 0) |
+++ content/common/sequenced_worker_pool.cc (revision 0) |
@@ -0,0 +1,489 @@ |
+// Copyright (c) 2011 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/common/sequenced_worker_pool.h" |
+ |
+#include <deque> |
+#include <set> |
+ |
+#include "base/atomicops.h" |
+#include "base/bind.h" |
+#include "base/memory/scoped_ptr.h" |
+#include "base/metrics/histogram.h" |
+#include "base/threading/thread.h" |
+#include "base/stringprintf.h" |
+#include "base/synchronization/condition_variable.h" |
+#include "base/synchronization/waitable_event.h" |
+#include "base/threading/simple_thread.h" |
+ |
+namespace { |
+ |
+struct SequencedTask { |
+ int sequence_token_id; |
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior; |
+ tracked_objects::Location location; |
+ base::Closure task; |
+}; |
+ |
+} // namespace |
+ |
+// Worker --------------------------------------------------------------------- |
+ |
+class SequencedWorkerPool::Worker : public base::SimpleThread { |
+ public: |
+ Worker(SequencedWorkerPool::Inner* inner, int thread_number); |
+ ~Worker(); |
+ |
+ // SimpleThread implementation. This actually runs the background thread. |
+ virtual void Run(); |
+ |
+ private: |
+ SequencedWorkerPool::Inner* inner_; |
+ SequencedWorkerPool::WorkerShutdown current_shutdown_mode_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(Worker); |
+}; |
+ |
+ |
+// Inner ---------------------------------------------------------------------- |
+ |
+class SequencedWorkerPool::Inner |
+ : public base::RefCountedThreadSafe<SequencedWorkerPool::Inner> { |
+ public: |
+ Inner(size_t max_threads); |
+ virtual ~Inner(); |
+ |
+ // Backends for SequenceWorkerPool. |
+ SequenceToken GetSequenceToken(); |
+ SequenceToken GetNamedSequenceToken(const std::string& name); |
+ bool PostTask(int sequence_token_id, |
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior, |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task); |
+ void Shutdown(); |
+ void SetTestingObserver(SequencedWorkerPool::TestingObserver* observer); |
+ |
+ // Runs the worker loop on the background thread. |
+ void ThreadLoop(Worker* this_worker); |
+ |
+ private: |
+ bool GetWork(SequencedTask* task); |
+ |
+ void WillRunWorkerTask(Worker* worker, const SequencedTask& task); |
+ void DidRunWorkerTask(Worker* worker, const SequencedTask& task); |
+ |
+ // Returns true if there are no threads currently running the given |
+ // sequence token. |
+ bool IsSequenceTokenRunnable(int sequence_token_id) const; |
+ |
+ // Creates an additional worker thread if all threads are busy and the |
+ // addition of one more could run an additional task waiting in the queue. |
+ // Returns true if a thread was created. |
+ bool StartAdditionalThreadIfHelpful(); |
+ |
+ // The last sequence number used. Managed by GetSequenceToken, since this |
+ // only does threadsafe increment operations, you do not need to hold the |
+ // lock. |
+ volatile base::subtle::Atomic32 last_sequence_number_; |
+ |
+ // This lock protects |everything in this class|. Do not read or modify |
+ // anything without holding this lock. Do not block while holding this |
+ // lock. |
+ base::Lock lock_; |
+ |
+ // Condition variable used to wake up worker threads when a task is runnable. |
+ base::ConditionVariable cond_var_; |
+ |
+ // The maximum number of worker threads we'll create. |
+ size_t max_threads_; |
+ |
+ // Associates all known sequence token names with their IDs. |
+ std::map<std::string, int> named_sequence_tokens_; |
+ |
+ // Owning pointers to all threads we've created so far. Since we lazily |
+ // create threads, this may be less than max_threads_ and will be initially |
+ // empty. |
+ std::vector<linked_ptr<Worker> > threads_; |
jar (doing other things)
2011/11/29 18:44:15
You don't need to have a list of threads. All you
brettw
2011/12/01 02:44:57
This won't work. I need the actual SimpleThread* t
jar (doing other things)
2011/12/09 19:01:48
Hmm.... this is a confusing requirement. What is
|
+ |
+ // Number of threads currently running tasks. |
+ size_t running_thread_count_; |
+ |
+ // Number of threads currently running tasks that have the BLOCK_SHUTDOWN |
+ // flag set. |
+ size_t blocking_shutdown_thread_count_; |
+ |
+ // In-order list of all pending tasks. These are tasks waiting for a thread |
+ // to run on or that are blocked on a previous task in their sequence. |
+ // |
+ // We maintain the pending_task_count_ separately for metrics because |
+ // list.size() can be linear time. |
+ std::list<SequencedTask> pending_tasks_; |
+ size_t pending_task_count_; |
+ |
+ // Number of tasks in the pending_tasks_ list that are marked as blocking |
+ // shutdown. |
+ size_t blocking_shutdown_pending_task_count_; |
+ |
+ // Lists all sequence tokens currently executing. |
+ std::set<int> current_sequences_; |
+ |
+ // Set when the app is terminating and no further tasks should be allowed, |
+ // though we may still be running existing tasks. |
+ bool terminating_; |
+ |
+ // Set when the worker pool is being destroyed, and all worker threads should |
+ // exit. |
+ bool exit_workers_; |
+ |
+ SequencedWorkerPool::TestingObserver* testing_observer_; |
+ |
+ // Created lazily when terminating_ is set and there are pending tasks, this |
+ // is signaled by ScheduleWork whel all blocking tasks have completed. |
+ scoped_ptr<base::WaitableEvent> shutdown_complete_; |
+}; |
+ |
+SequencedWorkerPool::Worker::Worker(SequencedWorkerPool::Inner* inner, |
+ int thread_number) |
+ : base::SimpleThread( |
+ StringPrintf("BrowserWorker%d", thread_number).c_str()), |
+ inner_(inner), |
+ current_shutdown_mode_(SequencedWorkerPool::CONTINUE_ON_SHUTDOWN) { |
+ Start(); |
+} |
+ |
+SequencedWorkerPool::Worker::~Worker() { |
+} |
+ |
+void SequencedWorkerPool::Worker::Run() { |
+ // Just jump back to the Inner object to run the thread, since it has all the |
+ // tracking information and queues. It might be more natural to implement |
+ // using DelegateSimpleThread and have Inner implement the Delegate to avoid |
+ // having these worker objects at all, but that method lacks the ability to |
+ // send thread-specific information easily to the thread loop. |
+ inner_->ThreadLoop(this); |
+} |
+ |
+SequencedWorkerPool::Inner::Inner(size_t max_threads) |
+ : last_sequence_number_(0), |
+ lock_(), |
+ cond_var_(&lock_), |
+ max_threads_(max_threads), |
+ running_thread_count_(0), |
+ blocking_shutdown_thread_count_(0), |
+ pending_task_count_(0), |
+ blocking_shutdown_pending_task_count_(0), |
+ terminating_(false), |
+ exit_workers_(false) { |
+} |
+ |
+SequencedWorkerPool::Inner::~Inner() { |
+ { |
+ base::AutoLock lock(lock_); |
+ |
+ // Tell all workers to exit. The Worker destructor will actually join with |
+ // each corresponding thread when the object is torn down. |
+ exit_workers_ = true; |
+ cond_var_.Broadcast(); |
jar (doing other things)
2011/11/29 18:44:15
This works... but assuming each thread will, if it
brettw
2011/12/01 02:44:57
Okay, done.
|
+ } |
+ |
+ // Need to explicitly join with the threads before they're destroyed or else |
+ // they will be running when our object is half torn down. |
+ for (size_t i = 0; i < threads_.size(); i++) |
+ threads_[i]->Join(); |
+ threads_.clear(); |
+} |
+ |
+SequencedWorkerPool::SequenceToken |
+SequencedWorkerPool::Inner::GetSequenceToken() { |
+ base::subtle::Atomic32 result = |
+ base::subtle::NoBarrier_AtomicIncrement(&last_sequence_number_, 1); |
+ return SequenceToken(static_cast<int>(result)); |
+} |
+ |
+SequencedWorkerPool::SequenceToken |
+SequencedWorkerPool::Inner::GetNamedSequenceToken( |
+ const std::string& name) { |
+ base::AutoLock lock(lock_); |
+ std::map<std::string, int>::const_iterator found = |
+ named_sequence_tokens_.find(name); |
+ if (found != named_sequence_tokens_.end()) |
+ return SequenceToken(found->second); // Got an existing one. |
+ |
+ // Create a new one for this name. |
+ SequenceToken result = GetSequenceToken(); |
+ named_sequence_tokens_.insert(std::make_pair(name, result.id_)); |
+ return result; |
+} |
+ |
+bool SequencedWorkerPool::Inner::PostTask( |
+ int sequence_token_id, |
+ SequencedWorkerPool::WorkerShutdown shutdown_behavior, |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task) { |
+ base::AutoLock lock(lock_); |
+ |
+ if (terminating_) |
+ return false; |
+ |
+ SequencedTask sequenced; |
+ sequenced.sequence_token_id = sequence_token_id; |
+ sequenced.shutdown_behavior = shutdown_behavior; |
+ sequenced.location = from_here; |
+ sequenced.task = task; |
+ |
+ pending_tasks_.push_back(sequenced); |
+ pending_task_count_++; |
+ if (shutdown_behavior == BLOCK_SHUTDOWN) |
+ blocking_shutdown_pending_task_count_++; |
+ |
+ if (!StartAdditionalThreadIfHelpful()) { |
+ // Wake up a worker thread to run this. |
+ cond_var_.Signal(); |
jar (doing other things)
2011/11/29 18:44:15
nit: Nicer to signal when after you release the lo
|
+ } |
+ |
+ return true; |
+} |
+ |
+void SequencedWorkerPool::Inner::Shutdown() { |
+ // Mark us as terminated and go through and drop all tasks that aren't |
+ // required to run on shutdown. Since no new tasks will get posted once the |
+ // terminated flag is set, this ensures that all remaining tasks are required |
+ // for shutdown whenever the termianted_ flag is set. |
+ { |
+ base::AutoLock lock(lock_); |
+ DCHECK(!terminating_); |
+ terminating_ = true; |
+ |
+ if (blocking_shutdown_thread_count_ == 0 && |
+ blocking_shutdown_pending_task_count_ == 0) { |
+ // There are no pending or running tasks blocking shutdown, we're done. |
jar (doing other things)
2011/11/29 18:44:15
This is perfect. You've effectively checked the c
brettw
2011/12/01 02:44:57
I did this but couldn't write the loop here to kee
|
+ return; |
+ } |
+ |
+ // Need to wait for some tasks, create the event. |
+ DCHECK(!shutdown_complete_.get()); |
+ shutdown_complete_.reset(new base::WaitableEvent(false, false)); |
+ } |
+ |
+ // If we get here, we know we're either waiting on a blocking task that's |
+ // currently running, waiting on a blocking task that hasn't been scheduled |
+ // yet, or both. Block on the "queue empty" event to know when all tasks are |
+ // complete. This must be done outside the lock. |
+ if (testing_observer_) |
+ testing_observer_->WillWaitForShutdown(); |
+ |
+ base::TimeTicks shutdown_wait_begin = base::TimeTicks::Now(); |
+ shutdown_complete_->Wait(); |
+ UMA_HISTOGRAM_TIMES("SequencedWorkerPool.ShutdownDelayTime", |
+ base::TimeTicks::Now() - shutdown_wait_begin); |
+} |
+ |
+void SequencedWorkerPool::Inner::SetTestingObserver( |
+ SequencedWorkerPool::TestingObserver* observer) { |
+ base::AutoLock lock(lock_); |
+ testing_observer_ = observer; |
+} |
+ |
+void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) { |
+ base::AutoLock lock(lock_); |
+ while (!exit_workers_) { |
+ SequencedTask task; |
+ if (GetWork(&task)) { |
+ WillRunWorkerTask(this_worker, task); // Must be done inside the lock. |
+ { |
+ base::AutoUnlock unlock(lock_); |
+ task.task.Run(); |
+ } |
+ DidRunWorkerTask(this_worker, task); // Must be done inside the lock. |
+ } else { |
+ cond_var_.Wait(); |
+ } |
+ } |
+} |
+ |
+bool SequencedWorkerPool::Inner::GetWork(SequencedTask* task) { |
+ lock_.AssertAcquired(); |
+ |
+ DCHECK_EQ(pending_tasks_.size(), pending_task_count_); |
+ UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.TaskCount", |
+ pending_task_count_); |
+ |
+ // Find the next task with a sequence token that's not currently in use. |
+ // If the token is in use, that means another thread is running something |
+ // in that sequence, and we can't run it without going out-of-order. |
+ // |
+ // This algorithm is simple and fair, but inefficient in some cases. For |
+ // example, say somebody schedules 1000 slow tasks with the same sequence |
+ // number. We'll have to go through all those tasks each time we feel like |
+ // there might be work to schedule. If this proves to be a problem, we |
+ // should make this more efficient. |
+ // |
+ // One possible enhancement would be to keep a map from sequence ID to a |
+ // list of pending but currently blocked SequencedTasks for that ID. |
+ // When a worker finishes a task of one sequence token, it can pick up the |
+ // next one from that token right away. |
+ // |
+ // This may lead to starvation if there are sufficient numbers of sequences |
+ // in use. To alleviate this, we could add an incrementing priority counter |
+ // to each SequencedTask. Then maintain a priority_queue of all runnable |
+ // tasks, sorted by priority counter. When a sequenced task is completed |
+ // we would pop the head element off of that tasks pending list and add it |
+ // to the priority queue. Then we would run the first item in the priority |
+ // queue. |
+ bool found_task = false; |
+ int unrunnable_tasks = 0; |
+ std::list<SequencedTask>::iterator i = pending_tasks_.begin(); |
+ while (i != pending_tasks_.end()) { |
+ if (IsSequenceTokenRunnable(i->sequence_token_id)) { |
+ if (terminating_ && i->shutdown_behavior != BLOCK_SHUTDOWN) { |
+ // We're shutting down and the task we just found isn't blocking |
+ // shutdown. Delete it and get more work. |
+ // |
+ // Note that we do not want to delete unrunnable tasks. Deleting a task |
+ // can have side effects (like freeing some objects) and deleting a |
+ // task that's supposed to run after one that's currently running could |
+ // cause an obscure crash. |
+ i = pending_tasks_.erase(i); |
jar (doing other things)
2011/11/29 18:44:15
So you decided to leak the less important tasks?
brettw
2011/12/01 02:44:57
This is the worker thread, but I forgot to do it o
|
+ pending_task_count_--; |
jar (doing other things)
2011/11/29 18:44:15
if you add a "continue;" here, you won't need ane
|
+ } else { |
+ // Found a runnable task. |
+ *task = *i; |
+ i = pending_tasks_.erase(i); |
+ pending_task_count_--; |
+ if (task->shutdown_behavior == BLOCK_SHUTDOWN) |
+ blocking_shutdown_pending_task_count_--; |
jar (doing other things)
2011/11/29 18:44:15
Lines 353-355 are critical accounting lines, and k
brettw
2011/12/01 02:44:57
It does not seem clear to me that there's an obvio
|
+ |
+ found_task = true; |
+ break; |
+ } |
+ } else { |
+ unrunnable_tasks++; |
+ ++i; |
jar (doing other things)
2011/11/29 18:44:15
This was harder to read than needed, because this
|
+ } |
+ } |
+ |
+ // Track the number of tasks we had to skip over to see if we should be |
+ // making this more efficient. If this number ever becomes large or is |
+ // frequently "some", we should consider the optimization above. |
+ UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.UnrunnableTaskCount", |
+ unrunnable_tasks); |
+ return found_task; |
+} |
+ |
+void SequencedWorkerPool::Inner::WillRunWorkerTask(Worker* worker, |
+ const SequencedTask& task) { |
+ lock_.AssertAcquired(); |
+ |
+ // Mark the task's sequence number as in use. |
+ if (task.sequence_token_id) |
+ current_sequences_.insert(task.sequence_token_id); |
+ |
+ running_thread_count_++; |
+ |
+ if (task.shutdown_behavior == SequencedWorkerPool::BLOCK_SHUTDOWN) |
+ blocking_shutdown_thread_count_++; |
+ |
+ // We just picked up a task. Since StartAdditionalThreadIfHelpful only |
+ // creates a new thread if there is no free one, there is a race when posting |
+ // tasks that many tasks could have been posted before a thread started |
+ // running them, so only one thread would have been created. So we also check |
+ // for more threads after removing our task from the queue, which also has |
+ // the nice side effect of creating the workers from background threads |
+ // rather than the main thread of the app. |
+ // |
+ // If another thread wasn't created, we want to wake up an existing thread |
+ // if there is one waiting to pick up the next task. |
+ if (!StartAdditionalThreadIfHelpful() && !pending_tasks_.empty() && |
+ running_thread_count_ < threads_.size()) |
+ cond_var_.Signal(); |
+} |
+ |
+void SequencedWorkerPool::Inner::DidRunWorkerTask(Worker* worker, |
+ const SequencedTask& task) { |
+ lock_.AssertAcquired(); |
+ |
+ if (task.shutdown_behavior == SequencedWorkerPool::BLOCK_SHUTDOWN) { |
+ DCHECK(blocking_shutdown_thread_count_ > 0); |
+ blocking_shutdown_thread_count_--; |
+ } |
+ |
+ if (task.sequence_token_id) |
+ current_sequences_.erase(task.sequence_token_id); |
+ |
+ running_thread_count_--; |
+ |
+ if (terminating_ && shutdown_complete_.get() && |
+ blocking_shutdown_pending_task_count_ == 0 && |
+ blocking_shutdown_thread_count_ == 0) { |
+ // No more blocking shutdown work. |
+ shutdown_complete_->Signal(); |
+ } |
+} |
+ |
+bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable( |
+ int sequence_token_id) const { |
+ lock_.AssertAcquired(); |
+ return !sequence_token_id || |
+ current_sequences_.find(sequence_token_id) == |
+ current_sequences_.end(); |
+} |
+ |
+bool SequencedWorkerPool::Inner::StartAdditionalThreadIfHelpful() { |
+ if (threads_.size() < max_threads_ && |
+ running_thread_count_ == threads_.size()) { |
+ // We could use an additional thread if there's work to be done. |
+ for (std::list<SequencedTask>::iterator i = pending_tasks_.begin(); |
jar (doing other things)
2011/11/29 18:44:15
I was hoping/expecting that you wouldn't need to s
brettw
2011/12/01 02:44:57
I don't want to re-use GetWork. It's already compl
|
+ i != pending_tasks_.end(); ++i) { |
+ if (IsSequenceTokenRunnable(i->sequence_token_id)) { |
+ // Found a runnable task, start a thread. |
+ threads_.push_back(linked_ptr<Worker>( |
+ new Worker(this, threads_.size()))); |
+ return true; |
+ } |
+ } |
+ } |
+ return false; |
+} |
+ |
+// SequencedWorkerPool -------------------------------------------------------- |
+ |
+SequencedWorkerPool::SequencedWorkerPool(size_t max_threads) |
+ : inner_(new Inner(max_threads)) { |
+} |
+ |
+SequencedWorkerPool::~SequencedWorkerPool() { |
+} |
+ |
+SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetSequenceToken() { |
+ return inner_->GetSequenceToken(); |
+} |
+ |
+SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetNamedSequenceToken( |
+ const std::string& name) { |
+ return inner_->GetNamedSequenceToken(name); |
+} |
+ |
+bool SequencedWorkerPool::PostWorkerTask( |
+ WorkerShutdown shutdown_behavior, |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task) { |
+ return inner_->PostTask(0, shutdown_behavior, from_here, task); |
+} |
+ |
+bool SequencedWorkerPool::PostSequencedWorkerTask( |
+ SequenceToken sequence_token, |
+ WorkerShutdown shutdown_behavior, |
+ const tracked_objects::Location& from_here, |
+ const base::Closure& task) { |
+ return inner_->PostTask(sequence_token.id_, shutdown_behavior, |
+ from_here, task); |
+} |
+ |
+void SequencedWorkerPool::Shutdown() { |
+ inner_->Shutdown(); |
+} |
+ |
+void SequencedWorkerPool::SetTestingObserver(TestingObserver* observer) { |
+ inner_->SetTestingObserver(observer); |
+} |