Chromium Code Reviews| Index: base/task_scheduler/thread_pool.cc |
| diff --git a/base/task_scheduler/thread_pool.cc b/base/task_scheduler/thread_pool.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..55f17baaa4fc59ecbe3a3dd64b9db012ff4df86e |
| --- /dev/null |
| +++ b/base/task_scheduler/thread_pool.cc |
| @@ -0,0 +1,253 @@ |
| +// Copyright 2016 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/task_scheduler/thread_pool.h" |
| + |
| +#include <utility> |
| + |
| +#include "base/bind.h" |
| +#include "base/bind_helpers.h" |
| +#include "base/lazy_instance.h" |
| +#include "base/logging.h" |
| +#include "base/task_scheduler/task_tracker.h" |
| + |
| +namespace base { |
| +namespace internal { |
| + |
| +namespace { |
| + |
| +// Shared PriorityQueue of a thread's ThreadPool. Not set for threads that don't |
| +// belong to a ThreadPool. |
| +LazyInstance<ThreadLocalPointer<const PriorityQueue>>::Leaky |
| + g_current_shared_priority_queue = LAZY_INSTANCE_INITIALIZER; |
| + |
| +void PostTaskCallback(scoped_refptr<Sequence> sequence, |
|
robliao
2016/03/31 22:48:56
Wasn't this and the below going to go into utils.h
fdoray
2016/04/01 16:02:51
Yes, it was previously in utils.h|.cc because it w
robliao
2016/04/01 19:14:49
Given that PostTaskHelper is somewhat like a priva
fdoray
2016/04/01 20:16:45
PostTaskHelper is only one statement, but it indir
|
| + PriorityQueue* priority_queue, |
| + scoped_ptr<Task> task) { |
|
robliao
2016/03/31 22:48:55
Should DCHECK(task) as it's not immediately derefe
fdoray
2016/04/01 16:02:51
Done. DCHECK everything to be consistent.
|
| + if (sequence->PushTask(std::move(task))) { |
| + // |sequence| must be inserted into |priority_queue| because it was empty |
| + // before |task| was inserted into it. |
| + const SequenceSortKey sequence_sort_key = sequence->GetSortKey(); |
| + priority_queue->BeginTransaction()->Push( |
| + make_scoped_ptr(new PriorityQueue::SequenceAndSortKey( |
| + std::move(sequence), sequence_sort_key))); |
| + } |
| +} |
| + |
| +// Helper for posting |task| to the provided |sequence| and |priority_queue| |
| +// conditional on |task_tracker|. Used by all TaskRunners defined in this file. |
| +void PostTaskHelper(scoped_ptr<Task> task, |
| + scoped_refptr<Sequence> sequence, |
|
robliao
2016/03/31 22:48:55
sequence, priority_queue, and task should be DCHEC
fdoray
2016/04/01 16:02:52
Done.
|
| + PriorityQueue* priority_queue, |
| + TaskTracker* task_tracker) { |
| + task_tracker->PostTask( |
| + Bind(&PostTaskCallback, std::move(sequence), priority_queue), |
| + std::move(task)); |
| +} |
| + |
| +// A task runner that runs tasks with the PARALLEL ExecutionMode. |
| +class SchedulerParallelTaskRunner : public TaskRunner { |
| + public: |
| + SchedulerParallelTaskRunner(const TaskTraits& traits, |
| + PriorityQueue* priority_queue, |
| + TaskTracker* task_tracker) |
| + : traits_(traits), |
| + priority_queue_(priority_queue), |
| + task_tracker_(task_tracker) {} |
| + |
| + // TaskRunner: |
| + bool PostDelayedTask(const tracked_objects::Location& from_here, |
| + const Closure& closure, |
| + TimeDelta delay) override { |
| + // TODO(fdoray): Support delayed tasks. |
| + DCHECK(delay.is_zero()); |
| + PostTaskHelper(make_scoped_ptr(new Task(from_here, closure, traits_)), |
| + make_scoped_refptr(new Sequence), priority_queue_, |
| + task_tracker_); |
| + return true; |
| + } |
| + |
| + bool RunsTasksOnCurrentThread() const override { |
| + return g_current_shared_priority_queue.Get().Get() == priority_queue_; |
| + } |
| + |
| + private: |
| + ~SchedulerParallelTaskRunner() override = default; |
| + |
| + const TaskTraits traits_; |
| + PriorityQueue* const priority_queue_; |
| + TaskTracker* const task_tracker_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(SchedulerParallelTaskRunner); |
| +}; |
| + |
| +} // namespace |
| + |
| +ThreadPool::~ThreadPool() { |
| + AutoSchedulerLock auto_lock(join_for_testing_returned_lock_); |
| + DCHECK(join_for_testing_returned_); |
| +} |
| + |
| +scoped_ptr<ThreadPool> ThreadPool::CreateThreadPool( |
| + ThreadPriority thread_priority, |
| + size_t num_threads, |
| + const SchedulerWorkerThread::RanTaskFromSequenceCallback& |
| + ran_task_from_sequence_callback, |
| + TaskTracker* task_tracker) { |
| + scoped_ptr<ThreadPool> thread_pool( |
| + new ThreadPool(thread_priority, num_threads, |
| + ran_task_from_sequence_callback, task_tracker)); |
| + |
| + if (thread_pool->worker_threads_.empty()) |
|
robliao
2016/03/31 22:48:56
Does checking the thread count == num_threads make
fdoray
2016/04/01 16:02:51
gab@ said that we should return a ThreadPool w/o e
robliao
2016/04/01 19:14:49
A histogram would make sense if we were able to ta
fdoray
2016/04/01 20:16:45
Done. Renamed num_threads -> max_threads.
I think
|
| + return nullptr; |
| + return thread_pool; |
| +} |
| + |
| +scoped_refptr<TaskRunner> ThreadPool::CreateTaskRunnerWithTraits( |
| + const TaskTraits& traits, |
| + ExecutionMode execution_mode) { |
| + switch (execution_mode) { |
| + case ExecutionMode::PARALLEL: |
| + return make_scoped_refptr(new SchedulerParallelTaskRunner( |
| + traits, &shared_priority_queue_, task_tracker_)); |
| + |
| + case ExecutionMode::SEQUENCED: |
| + case ExecutionMode::SINGLE_THREADED: |
| + NOTIMPLEMENTED(); |
| + return nullptr; |
| + } |
| +} |
| + |
| +void ThreadPool::ReinsertSequence(const SchedulerWorkerThread* worker_thread, |
| + scoped_refptr<Sequence> sequence, |
| + const SequenceSortKey& sequence_sort_key) { |
| + DCHECK(!g_current_shared_priority_queue.Get().Get()); |
| + |
| + // If |worker_thread| belongs to this ThreadPool, set a flag to avoid waking |
| + // up a SchedulerWorkerThread when |sequence| is reinserted in |
| + // |shared_priority_queue_|. In such cases, |worker_thread| will soon pop a |
| + // Sequence from |shared_priority_queue_| which means that there is no need to |
| + // wake up another SchedulerWorkerThread to do so. |
|
robliao
2016/03/31 22:48:56
Update this comment with the delegated GetWork.
fdoray
2016/04/01 16:02:51
Done.
|
| + if (g_current_shared_priority_queue.Get().Get() == &shared_priority_queue_) |
| + no_wake_up_on_sequence_insertion_.Set(true); |
| + |
| + shared_priority_queue_.BeginTransaction()->Push( |
| + make_scoped_ptr(new PriorityQueue::SequenceAndSortKey( |
| + std::move(sequence), sequence_sort_key))); |
| + no_wake_up_on_sequence_insertion_.Set(false); |
| +} |
| + |
| +void ThreadPool::WaitForAllWorkerThreadsIdleForTesting() { |
| + AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_); |
| + while (idle_worker_threads_stack_.size() < worker_threads_.size()) |
| + idle_worker_threads_stack_cv_->Wait(); |
| +} |
| + |
| +void ThreadPool::JoinForTesting() { |
| + for (const auto& worker_thread : worker_threads_) |
| + worker_thread->JoinForTesting(); |
| + |
| + { |
|
robliao
2016/03/31 22:48:55
Is this scope necessary?
fdoray
2016/04/01 16:02:52
No. Removed it.
|
| + AutoSchedulerLock auto_lock(join_for_testing_returned_lock_); |
| + DCHECK(!join_for_testing_returned_); |
| + join_for_testing_returned_ = true; |
| + } |
| +} |
| + |
| +ThreadPool::ThreadPool(ThreadPriority thread_priority, |
| + size_t num_threads, |
| + const SchedulerWorkerThread::RanTaskFromSequenceCallback& |
| + ran_task_from_sequence_callback, |
| + TaskTracker* task_tracker) |
| + : shared_priority_queue_( |
| + Bind(&ThreadPool::SequenceInsertedInSharedPriorityQueueCallback, |
| + Unretained(this))), |
| + idle_worker_threads_stack_lock_(shared_priority_queue_.container_lock()), |
| + idle_worker_threads_stack_cv_( |
| + idle_worker_threads_stack_lock_.CreateConditionVariable()), |
| + task_tracker_(task_tracker) { |
| + DCHECK_GT(num_threads, 0U); |
| + DCHECK(!ran_task_from_sequence_callback.is_null()); |
| + DCHECK(task_tracker_); |
| + |
| + // |this| always outlives the worker threads to which these callbacks are |
| + // passed. |
| + const Closure main_entry_callback( |
| + Bind(&ThreadPool::MainEntryCallback, Unretained(this))); |
| + const SchedulerWorkerThread::GetWorkCallback get_work_callback( |
| + Bind(&ThreadPool::GetWorkCallback, Unretained(this))); |
| + |
| + AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_); |
| + |
| + for (size_t i = 0; i < num_threads; ++i) { |
| + scoped_ptr<SchedulerWorkerThread> worker_thread = |
| + SchedulerWorkerThread::CreateSchedulerWorkerThread( |
| + thread_priority, main_entry_callback, get_work_callback, |
| + ran_task_from_sequence_callback, task_tracker); |
| + if (worker_thread) { |
|
robliao
2016/03/31 22:48:55
If we decide to fail when we couldn't create all t
fdoray
2016/04/01 16:02:52
I think it makes sense to fail-fast because it is
|
| + idle_worker_threads_stack_.push(worker_thread.get()); |
| + worker_threads_.push_back(std::move(worker_thread)); |
| + } |
| + } |
| +} |
| + |
| +void ThreadPool::WakeUpOneThread() { |
| + AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_); |
| + |
| + if (idle_worker_threads_stack_.empty()) |
| + return; |
| + |
| + idle_worker_threads_stack_.top()->WakeUp(); |
|
robliao
2016/03/31 22:48:56
Optional: It might be more robust to wake up the t
fdoray
2016/04/01 16:02:51
Done. I don't like the fact that it requires one m
|
| + idle_worker_threads_stack_.pop(); |
| +} |
| + |
| +void ThreadPool::AddToIdleSchedulerWorkerThreadsStack( |
| + SchedulerWorkerThread* worker_thread) { |
| + AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_); |
| + idle_worker_threads_stack_.push(worker_thread); |
| + DCHECK_LE(idle_worker_threads_stack_.size(), worker_threads_.size()); |
| + |
| + if (idle_worker_threads_stack_.size() == worker_threads_.size()) |
| + idle_worker_threads_stack_cv_->Signal(); |
| +} |
| + |
| +void ThreadPool::SequenceInsertedInSharedPriorityQueueCallback() { |
| + if (!no_wake_up_on_sequence_insertion_.Get()) |
| + WakeUpOneThread(); |
| +} |
| + |
| +void ThreadPool::MainEntryCallback() const { |
| + DCHECK(!g_current_shared_priority_queue.Get().Get()); |
| + g_current_shared_priority_queue.Get().Set(&shared_priority_queue_); |
| +} |
| + |
| +scoped_refptr<Sequence> ThreadPool::GetWorkCallback( |
| + SchedulerWorkerThread* worker_thread) { |
| + scoped_ptr<PriorityQueue::Transaction> transaction( |
| + shared_priority_queue_.BeginTransaction()); |
| + const PriorityQueue::SequenceAndSortKey sequence_and_sort_key( |
| + transaction->Peek()); |
| + |
| + if (sequence_and_sort_key.is_null()) { |
| + // |transaction| is kept alive while |worker_thread| is added to |
| + // |idle_worker_threads_stack_| to avoid this scenario: |
|
robliao
2016/03/31 22:48:55
Nit: scenario -> race
fdoray
2016/04/01 16:02:52
Done.
|
| + // 1. This thread creates a Transaction, finds |shared_priority_queue_| |
| + // empty and ends the Transaction. |
| + // 2. Other thread creates a Transaction, inserts a Sequence into |
| + // |shared_priority_queue_| and ends the Transaction. This couldn't |
|
robliao
2016/03/31 22:48:55
Nit: couldn't -> can't
fdoray
2016/04/01 16:02:51
Done.
|
| + // happen if the Transaction of step 1 was still active. |
| + // 3. Other thread calls WakeUpOneThread(). No thread is woken up because |
| + // |idle_worker_threads_stack_| is empty. |
| + // 4. This thread adds itself to |idle_worker_threads_stack_| and goes to |
| + // sleep. No thread runs the Sequence inserted in step 2. |
| + AddToIdleSchedulerWorkerThreadsStack(worker_thread); |
| + return nullptr; |
| + } |
| + |
| + transaction->Pop(); |
| + return sequence_and_sort_key.sequence; |
| +} |
| + |
| +} // namespace internal |
| +} // namespace base |