Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1092)

Unified Diff: base/task_scheduler/scheduler_thread_pool.cc

Issue 1708773002: TaskScheduler [7] SchedulerThreadPool (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@s_5_worker_thread
Patch Set: merge SchedulerWorkerThreadDelegate Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: base/task_scheduler/scheduler_thread_pool.cc
diff --git a/base/task_scheduler/scheduler_thread_pool.cc b/base/task_scheduler/scheduler_thread_pool.cc
new file mode 100644
index 0000000000000000000000000000000000000000..4e7d7c00079d04ad8fc9a418deb0af30e85db377
--- /dev/null
+++ b/base/task_scheduler/scheduler_thread_pool.cc
@@ -0,0 +1,266 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_thread_pool.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/task_scheduler/task_tracker.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+// Shared PriorityQueue of a thread's SchedulerThreadPool. Not set for threads
+// that don't belong to a SchedulerThreadPool.
+LazyInstance<ThreadLocalPointer<const PriorityQueue>>::Leaky
+ tls_current_shared_priority_queue = LAZY_INSTANCE_INITIALIZER;
+
+// A task runner that runs tasks with the PARALLEL ExecutionMode.
+class SchedulerParallelTaskRunner : public TaskRunner {
+ public:
+ SchedulerParallelTaskRunner(const TaskTraits& traits,
+ PriorityQueue* priority_queue,
+ TaskTracker* task_tracker)
+ : traits_(traits),
+ priority_queue_(priority_queue),
+ task_tracker_(task_tracker) {}
+
+ // TaskRunner:
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& closure,
+ TimeDelta delay) override {
+ // TODO(fdoray): Support delayed tasks.
+ DCHECK(delay.is_zero());
+
+ // Create a new Sequence to allow parallel execution of Tasks posted through
gab 2016/04/07 20:32:47 s/new Sequence/single-task Sequence/ ("create" im
fdoray 2016/04/08 14:53:02 Done.
+ // this TaskRunner.
+ scoped_refptr<Sequence> sequence(new Sequence);
gab 2016/04/07 20:32:47 I'd also be happy to inline this, something like:
fdoray 2016/04/08 14:53:02 Done.
+
+ PostTaskHelper(WrapUnique(new Task(from_here, closure, traits_)),
+ std::move(sequence), priority_queue_, task_tracker_);
+
+ return true;
+ }
+
+ bool RunsTasksOnCurrentThread() const override {
+ return tls_current_shared_priority_queue.Get().Get() == priority_queue_;
+ }
+
+ private:
+ ~SchedulerParallelTaskRunner() override = default;
+
+ const TaskTraits traits_;
+ PriorityQueue* const priority_queue_;
+ TaskTracker* const task_tracker_;
+
+ DISALLOW_COPY_AND_ASSIGN(SchedulerParallelTaskRunner);
+};
+
+} // namespace
+
+std::unique_ptr<SchedulerThreadPool> SchedulerThreadPool::CreateThreadPool(
+ ThreadPriority thread_priority,
+ size_t max_threads,
+ const RanTaskFromSequenceCallback& ran_task_from_sequence_callback,
+ TaskTracker* task_tracker) {
+ std::unique_ptr<SchedulerThreadPool> thread_pool(
+ new SchedulerThreadPool(ran_task_from_sequence_callback, task_tracker));
+ thread_pool->Initialize(thread_priority, max_threads);
+ if (thread_pool->worker_threads_.empty())
+ return nullptr;
+ return thread_pool;
+}
+
+SchedulerThreadPool::~SchedulerThreadPool() {
+ AutoSchedulerLock auto_lock(join_for_testing_returned_lock_);
+ DCHECK(join_for_testing_returned_ || worker_threads_.empty());
gab 2016/04/07 20:32:47 Wrap these two lines in #if DCHECK_IS_ON() to avoi
fdoray 2016/04/08 14:53:02 Done.
+}
+
+scoped_refptr<TaskRunner> SchedulerThreadPool::CreateTaskRunnerWithTraits(
+ const TaskTraits& traits,
+ ExecutionMode execution_mode) {
+ switch (execution_mode) {
+ case ExecutionMode::PARALLEL:
+ return make_scoped_refptr(new SchedulerParallelTaskRunner(
+ traits, &shared_priority_queue_, task_tracker_));
+
+ case ExecutionMode::SEQUENCED:
+ case ExecutionMode::SINGLE_THREADED:
+ // TODO(fdoray): Support SEQUENCED and SINGLE_THREADED TaskRunners.
+ NOTREACHED();
+ return nullptr;
+ }
+
+ NOTREACHED();
+ return nullptr;
+}
+
+void SchedulerThreadPool::InsertSequenceAfterTaskRan(
+ scoped_refptr<Sequence> sequence,
+ const SequenceSortKey& sequence_sort_key) {
+ auto sequence_and_sort_key = WrapUnique(new PriorityQueue::SequenceAndSortKey(
+ std::move(sequence), sequence_sort_key));
+ auto transaction = shared_priority_queue_.BeginTransaction();
+
+ // The thread calling this method just ran a Task from |sequence| and will
+ // soon try to get another Sequence from which to run a Task. If the thread
+ // belongs to this pool, it will get that Sequence from
+ // |shared_priority_queue_|. When that's the case, there is no need to wake up
+ // another thread after |sequence| is inserted in |shared_priority_queue_|. If
+ // we did wake up another thread, we would waste resources by having more
+ // threads trying to get a Sequence from |shared_priority_queue_| than the
+ // number of Sequences in it.
+ if (tls_current_shared_priority_queue.Get().Get() == &shared_priority_queue_)
+ transaction->PushNoWakeUp(std::move(sequence_and_sort_key));
+ else
+ transaction->Push(std::move(sequence_and_sort_key));
+}
+
+void SchedulerThreadPool::WaitForAllWorkerThreadsIdleForTesting() {
+ AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_);
+ while (idle_worker_threads_stack_.size() < worker_threads_.size())
+ idle_worker_threads_stack_cv_->Wait();
+}
+
+void SchedulerThreadPool::JoinForTesting() {
+ for (const auto& worker_thread : worker_threads_)
+ worker_thread->JoinForTesting();
+
+ AutoSchedulerLock auto_lock(join_for_testing_returned_lock_);
gab 2016/04/07 20:32:47 We could get away with a MemoryBarrier instead of
fdoray 2016/04/08 14:53:02 I wanted to use a MemoryBarrier but https://codere
+ DCHECK(!join_for_testing_returned_);
+ join_for_testing_returned_ = true;
+}
+
+SchedulerThreadPool::SchedulerThreadPool(
+ const RanTaskFromSequenceCallback& ran_task_from_sequence_callback,
+ TaskTracker* task_tracker)
+ : shared_priority_queue_(
+ Bind(&SchedulerThreadPool::WakeUpOneThread, Unretained(this))),
+ idle_worker_threads_stack_lock_(shared_priority_queue_.container_lock()),
+ idle_worker_threads_stack_cv_(
+ idle_worker_threads_stack_lock_.CreateConditionVariable()),
+ ran_task_from_sequence_callback_(ran_task_from_sequence_callback),
+ task_tracker_(task_tracker) {
+ DCHECK(task_tracker_);
+}
+
+void SchedulerThreadPool::Initialize(ThreadPriority thread_priority,
+ size_t max_threads) {
+ DCHECK(worker_threads_.empty());
+
+ AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_);
+
+ for (size_t i = 0; i < max_threads; ++i) {
+ std::unique_ptr<SchedulerWorkerThread> worker_thread =
+ SchedulerWorkerThread::CreateSchedulerWorkerThread(thread_priority,
+ this, task_tracker_);
+ if (!worker_thread)
+ break;
+ idle_worker_threads_stack_.push(worker_thread.get());
+ worker_threads_.push_back(std::move(worker_thread));
+ }
+}
+
+void SchedulerThreadPool::WakeUpOneThread() {
+ AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_);
+
+ if (idle_worker_threads_stack_.empty())
+ return;
+
+ SchedulerWorkerThread* worker_thread = idle_worker_threads_stack_.top();
+ idle_worker_threads_stack_.pop();
+ worker_thread->WakeUp();
gab 2016/04/07 20:32:47 Could technically release the lock before invoking
fdoray 2016/04/08 14:53:02 Done.
+}
+
+void SchedulerThreadPool::AddToIdleWorkerThreadsStack(
+ SchedulerWorkerThread* worker_thread) {
+ AutoSchedulerLock auto_lock(idle_worker_threads_stack_lock_);
+ idle_worker_threads_stack_.push(worker_thread);
+ DCHECK_LE(idle_worker_threads_stack_.size(), worker_threads_.size());
+
+ if (idle_worker_threads_stack_.size() == worker_threads_.size())
+ idle_worker_threads_stack_cv_->Broadcast();
gab 2016/04/07 20:32:47 Would a manual-reset WaitableEvent be better here?
fdoray 2016/04/08 14:53:02 The WaitableEvent would be reset in WakeUpOneThrea
gab 2016/04/08 17:56:00 But the Broadcast's call can racily be lost, no? i
fdoray 2016/04/08 19:00:05 WaitForAllWorkerThreadsIdleForTesting will return
+}
+
+void SchedulerThreadPool::OnMainEntry() {
+ DCHECK(!tls_current_shared_priority_queue.Get().Get());
+ tls_current_shared_priority_queue.Get().Set(&shared_priority_queue_);
+}
+
+void SchedulerThreadPool::OnMainExit() {
+ DCHECK(tls_current_shared_priority_queue.Get().Get());
+ tls_current_shared_priority_queue.Get().Set(nullptr);
gab 2016/04/07 20:32:47 Actually looking into this deeper turns out this i
fdoray 2016/04/08 14:53:02 Done. Interesting.
+}
+
+scoped_refptr<Sequence> SchedulerThreadPool::GetWork(
+ SchedulerWorkerThread* worker_thread) {
+ std::unique_ptr<PriorityQueue::Transaction> transaction(
+ shared_priority_queue_.BeginTransaction());
+ const PriorityQueue::SequenceAndSortKey sequence_and_sort_key(
+ transaction->Peek());
+
+ if (sequence_and_sort_key.is_null()) {
+ // |transaction| is kept alive while |worker_thread| is added to
+ // |idle_worker_threads_stack_| to avoid this race:
+ // 1. This thread creates a Transaction, finds |shared_priority_queue_|
+ // empty and ends the Transaction.
+ // 2. Other thread creates a Transaction, inserts a Sequence into
+ // |shared_priority_queue_| and ends the Transaction. This can't happen
+ // if the Transaction of step 1 is still active because because there can
+ // only be one active Transaction per PriorityQueue at a time.
+ // 3. Other thread calls WakeUpOneThread(). No thread is woken up because
+ // |idle_worker_threads_stack_| is empty.
+ // 4. This thread adds itself to |idle_worker_threads_stack_| and goes to
+ // sleep. No thread runs the Sequence inserted in step 2.
+ AddToIdleWorkerThreadsStack(worker_thread);
+ return nullptr;
+ }
+
+ transaction->Pop();
gab 2016/04/07 20:32:47 Actually since this is all done under a single tra
fdoray 2016/04/08 14:53:02 Eventually, we'll have to: - peek from a shared an
+ return sequence_and_sort_key.sequence;
+}
+
+void SchedulerThreadPool::RanTaskFromSequence(
+ scoped_refptr<Sequence> sequence) {
+ ran_task_from_sequence_callback_.Run(std::move(sequence));
+}
+
+bool PostTaskHelper(std::unique_ptr<Task> task,
+ scoped_refptr<Sequence> sequence,
+ PriorityQueue* priority_queue,
+ TaskTracker* task_tracker) {
+ DCHECK(task);
+ DCHECK(sequence);
+ DCHECK(priority_queue);
+ DCHECK(task_tracker);
+
+ if (!task_tracker->WillPostTask(task.get()))
+ return false;
+
+ const bool sequence_was_empty = sequence->PushTask(std::move(task));
+ if (sequence_was_empty) {
+ // Insert |sequence| in |priority_queue| if it was empty before |task| was
+ // inserted into it. When that's not the case, one of these must be true:
+ // - |sequence| is already in a PriorityQueue, or,
+ // - A worker thread is running a Task from |sequence|. It will insert
+ // |sequence| in a PriorityQueue once it's done running the Task.
+ const SequenceSortKey sequence_sort_key = sequence->GetSortKey();
gab 2016/04/07 20:32:47 inline this?
fdoray 2016/04/08 14:53:02 I can't. I have to make sure that sequence->GetSor
+ priority_queue->BeginTransaction()->Push(
+ WrapUnique(new PriorityQueue::SequenceAndSortKey(std::move(sequence),
+ sequence_sort_key)));
+ }
+
+ return true;
+}
+
+} // namespace internal
+} // namespace base

Powered by Google App Engine
This is Rietveld 408576698