Chromium Code Reviews| Index: base/task_scheduler/scheduler_worker_pool_impl.cc |
| diff --git a/base/task_scheduler/scheduler_worker_pool_impl.cc b/base/task_scheduler/scheduler_worker_pool_impl.cc |
| index 5fb11942885571e9448d39ba55c4400ebd1e4695..68035ac3920519d8145c4fbf47998c42dd416dab 100644 |
| --- a/base/task_scheduler/scheduler_worker_pool_impl.cc |
| +++ b/base/task_scheduler/scheduler_worker_pool_impl.cc |
| @@ -192,25 +192,126 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl |
| DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegateImpl); |
| }; |
| +SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl( |
| + const std::string& name, |
| + ThreadPriority priority_hint, |
| + ReEnqueueSequenceCallback re_enqueue_sequence_callback, |
| + TaskTracker* task_tracker, |
| + DelayedTaskManager* delayed_task_manager) |
| + : name_(name), |
| + priority_hint_(priority_hint), |
| + re_enqueue_sequence_callback_(std::move(re_enqueue_sequence_callback)), |
| + idle_workers_stack_lock_(shared_priority_queue_.container_lock()), |
| + idle_workers_stack_cv_for_testing_( |
| + idle_workers_stack_lock_.CreateConditionVariable()), |
| + join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL, |
| + WaitableEvent::InitialState::NOT_SIGNALED), |
| +#if DCHECK_IS_ON() |
| + workers_created_(WaitableEvent::ResetPolicy::MANUAL, |
| + WaitableEvent::InitialState::NOT_SIGNALED), |
| +#endif |
| + // Mimics the UMA_HISTOGRAM_LONG_TIMES macro. |
| + detach_duration_histogram_(Histogram::FactoryTimeGet( |
| + kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix, |
| + TimeDelta::FromMilliseconds(1), |
| + TimeDelta::FromHours(1), |
| + 50, |
| + HistogramBase::kUmaTargetedHistogramFlag)), |
| + // Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more |
| + // than 1000 tasks before detaching, there is no need to know the exact |
| + // number of tasks that ran. |
| + num_tasks_before_detach_histogram_(Histogram::FactoryGet( |
| + kNumTasksBeforeDetachHistogramPrefix + name_ + kPoolNameSuffix, |
| + 1, |
| + 1000, |
| + 50, |
| + HistogramBase::kUmaTargetedHistogramFlag)), |
| + // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is |
| + // expected to run between zero and a few tens of tasks between waits. |
| + // When it runs more than 100 tasks, there is no need to know the exact |
| + // number of tasks that ran. |
| + num_tasks_between_waits_histogram_(Histogram::FactoryGet( |
| + kNumTasksBetweenWaitsHistogramPrefix + name_ + kPoolNameSuffix, |
| + 1, |
| + 100, |
| + 50, |
| + HistogramBase::kUmaTargetedHistogramFlag)), |
| + task_tracker_(task_tracker), |
| + delayed_task_manager_(delayed_task_manager) { |
| + DCHECK(task_tracker_); |
| + DCHECK(delayed_task_manager_); |
| +} |
| + |
| +void SchedulerWorkerPoolImpl::Start(const SchedulerWorkerPoolParams& params) { |
| + suggested_reclaim_time_ = params.suggested_reclaim_time(); |
| + |
| + std::vector<SchedulerWorker*> workers_to_wake_up; |
| + |
| + { |
| + // Get the number of enqueued Sequences to know how many workers to wake up |
| + // at the end of this method. Keep the transaction alive to prevent new |
| + // Sequences from being enqueued in the meantime. |
| + auto priority_queue_transaction = shared_priority_queue_.BeginTransaction(); |
| + const size_t num_workers_to_wake_up = priority_queue_transaction->Size(); |
| + |
| + // The number of workers created alive is |num_workers_to_wake_up|, plus one |
| + // if the standby thread policy is ONE. |
| + const size_t num_alive_workers = |
|
robliao
2017/04/05 23:29:23
Can do this computation at the end to avoid holdin
fdoray
2017/04/06 15:07:12
PQ lock is a predecessor of |idle_workers_stack_lo
|
| + num_workers_to_wake_up + |
| + (params.standby_thread_policy() == |
| + SchedulerWorkerPoolParams::StandbyThreadPolicy::ONE |
| + ? 1 |
| + : 0); |
| + |
| + AutoSchedulerLock auto_lock(idle_workers_stack_lock_); |
| + |
| +#if DCHECK_IS_ON() |
| + DCHECK(!workers_created_.IsSignaled()); |
| +#endif |
| + |
| + DCHECK(workers_.empty()); |
| + workers_.resize(params.max_threads()); |
| + |
| + // Create workers in reverse order of index so that the worker with the |
| + // highest index is at the bottom of the idle stack. |
| + for (int index = params.max_threads() - 1; index >= 0; --index) { |
| + const SchedulerWorker::InitialState initial_state = |
| + static_cast<size_t>(index) < num_alive_workers |
| + ? SchedulerWorker::InitialState::ALIVE |
| + : SchedulerWorker::InitialState::DETACHED; |
| + scoped_refptr<SchedulerWorker> worker = SchedulerWorker::Create( |
| + params.priority_hint(), |
| + MakeUnique<SchedulerWorkerDelegateImpl>( |
| + this, re_enqueue_sequence_callback_, index), |
| + task_tracker_, initial_state, params.backward_compatibility()); |
| + if (!worker) |
| + break; |
| + |
| + if (static_cast<size_t>(index) < num_workers_to_wake_up) |
| + workers_to_wake_up.push_back(worker.get()); |
| + else |
| + idle_workers_stack_.Push(worker.get()); |
| + |
| + workers_[index] = std::move(worker); |
| + } |
| + |
| +#if DCHECK_IS_ON() |
| + workers_created_.Signal(); |
| +#endif |
| + |
| + CHECK(!workers_.empty()); |
| + } |
| + |
| + for (SchedulerWorker* worker : workers_to_wake_up) |
| + worker->WakeUp(); |
| +} |
| + |
| SchedulerWorkerPoolImpl::~SchedulerWorkerPoolImpl() { |
| // SchedulerWorkerPool should never be deleted in production unless its |
| // initialization failed. |
| DCHECK(join_for_testing_returned_.IsSignaled() || workers_.empty()); |
| } |
| -// static |
| -std::unique_ptr<SchedulerWorkerPoolImpl> SchedulerWorkerPoolImpl::Create( |
| - const SchedulerWorkerPoolParams& params, |
| - const ReEnqueueSequenceCallback& re_enqueue_sequence_callback, |
| - TaskTracker* task_tracker, |
| - DelayedTaskManager* delayed_task_manager) { |
| - auto worker_pool = WrapUnique( |
| - new SchedulerWorkerPoolImpl(params, task_tracker, delayed_task_manager)); |
| - if (worker_pool->Initialize(params, re_enqueue_sequence_callback)) |
| - return worker_pool; |
| - return nullptr; |
| -} |
| - |
| scoped_refptr<TaskRunner> SchedulerWorkerPoolImpl::CreateTaskRunnerWithTraits( |
| const TaskTraits& traits) { |
| return make_scoped_refptr(new SchedulerParallelTaskRunner(traits, this)); |
| @@ -300,16 +401,25 @@ void SchedulerWorkerPoolImpl::GetHistograms( |
| } |
| int SchedulerWorkerPoolImpl::GetMaxConcurrentTasksDeprecated() const { |
| +#if DCHECK_IS_ON() |
| + DCHECK(workers_created_.IsSignaled()); |
| +#endif |
| return workers_.size(); |
| } |
| void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() { |
| +#if DCHECK_IS_ON() |
| + DCHECK(workers_created_.IsSignaled()); |
| +#endif |
| AutoSchedulerLock auto_lock(idle_workers_stack_lock_); |
| while (idle_workers_stack_.Size() < workers_.size()) |
| idle_workers_stack_cv_for_testing_->Wait(); |
| } |
| void SchedulerWorkerPoolImpl::JoinForTesting() { |
| +#if DCHECK_IS_ON() |
| + DCHECK(workers_created_.IsSignaled()); |
| +#endif |
| DCHECK(!CanWorkerDetachForTesting() || suggested_reclaim_time_.is_max()) |
| << "Workers can detach during join."; |
| for (const auto& worker : workers_) |
| @@ -324,6 +434,9 @@ void SchedulerWorkerPoolImpl::DisallowWorkerDetachmentForTesting() { |
| } |
| size_t SchedulerWorkerPoolImpl::NumberOfAliveWorkersForTesting() { |
| +#if DCHECK_IS_ON() |
| + DCHECK(workers_created_.IsSignaled()); |
| +#endif |
| size_t num_alive_workers = 0; |
| for (const auto& worker : workers_) { |
| if (worker->ThreadAliveForTesting()) |
| @@ -465,90 +578,6 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnDetach() { |
| last_detach_time_ = TimeTicks::Now(); |
| } |
| -SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl( |
| - const SchedulerWorkerPoolParams& params, |
| - TaskTracker* task_tracker, |
| - DelayedTaskManager* delayed_task_manager) |
| - : name_(params.name()), |
| - suggested_reclaim_time_(params.suggested_reclaim_time()), |
| - idle_workers_stack_lock_(shared_priority_queue_.container_lock()), |
| - idle_workers_stack_cv_for_testing_( |
| - idle_workers_stack_lock_.CreateConditionVariable()), |
| - join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL, |
| - WaitableEvent::InitialState::NOT_SIGNALED), |
| -#if DCHECK_IS_ON() |
| - workers_created_(WaitableEvent::ResetPolicy::MANUAL, |
| - WaitableEvent::InitialState::NOT_SIGNALED), |
| -#endif |
| - // Mimics the UMA_HISTOGRAM_LONG_TIMES macro. |
| - detach_duration_histogram_(Histogram::FactoryTimeGet( |
| - kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix, |
| - TimeDelta::FromMilliseconds(1), |
| - TimeDelta::FromHours(1), |
| - 50, |
| - HistogramBase::kUmaTargetedHistogramFlag)), |
| - // Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more |
| - // than 1000 tasks before detaching, there is no need to know the exact |
| - // number of tasks that ran. |
| - num_tasks_before_detach_histogram_(Histogram::FactoryGet( |
| - kNumTasksBeforeDetachHistogramPrefix + name_ + kPoolNameSuffix, |
| - 1, |
| - 1000, |
| - 50, |
| - HistogramBase::kUmaTargetedHistogramFlag)), |
| - // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is |
| - // expected to run between zero and a few tens of tasks between waits. |
| - // When it runs more than 100 tasks, there is no need to know the exact |
| - // number of tasks that ran. |
| - num_tasks_between_waits_histogram_(Histogram::FactoryGet( |
| - kNumTasksBetweenWaitsHistogramPrefix + name_ + kPoolNameSuffix, |
| - 1, |
| - 100, |
| - 50, |
| - HistogramBase::kUmaTargetedHistogramFlag)), |
| - task_tracker_(task_tracker), |
| - delayed_task_manager_(delayed_task_manager) { |
| - DCHECK(task_tracker_); |
| - DCHECK(delayed_task_manager_); |
| -} |
| - |
| -bool SchedulerWorkerPoolImpl::Initialize( |
| - const SchedulerWorkerPoolParams& params, |
| - const ReEnqueueSequenceCallback& re_enqueue_sequence_callback) { |
| - AutoSchedulerLock auto_lock(idle_workers_stack_lock_); |
| - |
| - DCHECK(workers_.empty()); |
| - workers_.resize(params.max_threads()); |
| - |
| - // Create workers and push them to the idle stack in reverse order of index. |
| - // This ensures that they are woken up in order of index and that the ALIVE |
| - // worker is on top of the stack. |
| - for (int index = params.max_threads() - 1; index >= 0; --index) { |
| - const bool is_standby_lazy = |
| - params.standby_thread_policy() == |
| - SchedulerWorkerPoolParams::StandbyThreadPolicy::LAZY; |
| - const SchedulerWorker::InitialState initial_state = |
| - (index == 0 && !is_standby_lazy) |
| - ? SchedulerWorker::InitialState::ALIVE |
| - : SchedulerWorker::InitialState::DETACHED; |
| - scoped_refptr<SchedulerWorker> worker = SchedulerWorker::Create( |
| - params.priority_hint(), |
| - MakeUnique<SchedulerWorkerDelegateImpl>( |
| - this, re_enqueue_sequence_callback, index), |
| - task_tracker_, initial_state, params.backward_compatibility()); |
| - if (!worker) |
| - break; |
| - idle_workers_stack_.Push(worker.get()); |
| - workers_[index] = std::move(worker); |
| - } |
| - |
| -#if DCHECK_IS_ON() |
| - workers_created_.Signal(); |
| -#endif |
| - |
| - return !workers_.empty(); |
| -} |
| - |
| void SchedulerWorkerPoolImpl::WakeUpOneWorker() { |
| SchedulerWorker* worker; |
| { |