| Index: base/task_scheduler/scheduler_worker_pool_impl.cc
|
| diff --git a/base/task_scheduler/scheduler_worker_pool_impl.cc b/base/task_scheduler/scheduler_worker_pool_impl.cc
|
| index 0515154e8ec74967c3b79ab529b80abcbfe0f475..d45495413ff95194b3fd90e2fb803e1e825896fd 100644
|
| --- a/base/task_scheduler/scheduler_worker_pool_impl.cc
|
| +++ b/base/task_scheduler/scheduler_worker_pool_impl.cc
|
| @@ -206,10 +206,6 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
|
| idle_workers_stack_lock_.CreateConditionVariable()),
|
| join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL,
|
| WaitableEvent::InitialState::NOT_SIGNALED),
|
| -#if DCHECK_IS_ON()
|
| - workers_created_(WaitableEvent::ResetPolicy::MANUAL,
|
| - WaitableEvent::InitialState::NOT_SIGNALED),
|
| -#endif
|
| // Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
|
| detach_duration_histogram_(Histogram::FactoryTimeGet(
|
| kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix,
|
| @@ -245,13 +241,11 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
|
| void SchedulerWorkerPoolImpl::Start(const SchedulerWorkerPoolParams& params) {
|
| suggested_reclaim_time_ = params.suggested_reclaim_time();
|
|
|
| - std::vector<SchedulerWorker*> workers_to_wake_up;
|
| -
|
| {
|
| AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
|
|
|
| #if DCHECK_IS_ON()
|
| - DCHECK(!workers_created_.IsSignaled());
|
| + DCHECK(!workers_created_.IsSet());
|
| #endif
|
|
|
| DCHECK(workers_.empty());
|
| @@ -270,34 +264,34 @@ void SchedulerWorkerPoolImpl::Start(const SchedulerWorkerPoolParams& params) {
|
| // Create workers in reverse order of index so that the worker with the
|
| // highest index is at the bottom of the idle stack.
|
| for (int index = params.max_threads() - 1; index >= 0; --index) {
|
| - const SchedulerWorker::InitialState initial_state =
|
| - index < num_alive_workers ? SchedulerWorker::InitialState::ALIVE
|
| - : SchedulerWorker::InitialState::DETACHED;
|
| - scoped_refptr<SchedulerWorker> worker = SchedulerWorker::Create(
|
| + workers_[index] = make_scoped_refptr(new SchedulerWorker(
|
| params.priority_hint(),
|
| MakeUnique<SchedulerWorkerDelegateImpl>(
|
| this, re_enqueue_sequence_callback_, index),
|
| - task_tracker_, initial_state, params.backward_compatibility());
|
| - if (!worker)
|
| - break;
|
| -
|
| - if (index < num_wake_ups_before_start_)
|
| - workers_to_wake_up.push_back(worker.get());
|
| - else
|
| - idle_workers_stack_.Push(worker.get());
|
| + task_tracker_, params.backward_compatibility(),
|
| + index < num_alive_workers ? SchedulerWorker::InitialState::ALIVE
|
| + : SchedulerWorker::InitialState::DETACHED));
|
|
|
| - workers_[index] = std::move(worker);
|
| + // Put workers that won't be woken up at the end of this method on the
|
| + // idle stack.
|
| + if (index >= num_wake_ups_before_start_)
|
| + idle_workers_stack_.Push(workers_[index].get());
|
| }
|
|
|
| #if DCHECK_IS_ON()
|
| - workers_created_.Signal();
|
| + workers_created_.Set();
|
| #endif
|
| -
|
| - CHECK(!workers_.empty());
|
| }
|
|
|
| - for (SchedulerWorker* worker : workers_to_wake_up)
|
| - worker->WakeUp();
|
| + // Start all workers. CHECK that the first worker can be started (assume that
|
| + // failure means that threads can't be created on this machine). Wake up one
|
| + // worker for each wake up that occurred before Start().
|
| + for (size_t index = 0; index < workers_.size(); ++index) {
|
| + const bool start_success = workers_[index]->Start();
|
| + CHECK(start_success || index > 0);
|
| + if (static_cast<int>(index) < num_wake_ups_before_start_)
|
| + workers_[index]->WakeUp();
|
| + }
|
| }
|
|
|
| SchedulerWorkerPoolImpl::~SchedulerWorkerPoolImpl() {
|
| @@ -398,14 +392,14 @@ void SchedulerWorkerPoolImpl::GetHistograms(
|
|
|
| int SchedulerWorkerPoolImpl::GetMaxConcurrentTasksDeprecated() const {
|
| #if DCHECK_IS_ON()
|
| - DCHECK(workers_created_.IsSignaled());
|
| + DCHECK(workers_created_.IsSet());
|
| #endif
|
| return workers_.size();
|
| }
|
|
|
| void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() {
|
| #if DCHECK_IS_ON()
|
| - DCHECK(workers_created_.IsSignaled());
|
| + DCHECK(workers_created_.IsSet());
|
| #endif
|
| AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
|
| while (idle_workers_stack_.Size() < workers_.size())
|
| @@ -414,7 +408,7 @@ void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() {
|
|
|
| void SchedulerWorkerPoolImpl::JoinForTesting() {
|
| #if DCHECK_IS_ON()
|
| - DCHECK(workers_created_.IsSignaled());
|
| + DCHECK(workers_created_.IsSet());
|
| #endif
|
| DCHECK(!CanWorkerDetachForTesting() || suggested_reclaim_time_.is_max())
|
| << "Workers can detach during join.";
|
| @@ -453,12 +447,9 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
|
| void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
|
| SchedulerWorker* worker) {
|
| #if DCHECK_IS_ON()
|
| - // Wait for |outer_->workers_created_| to avoid traversing
|
| - // |outer_->workers_| while it is being filled by Initialize().
|
| - outer_->workers_created_.Wait();
|
| + DCHECK(outer_->workers_created_.IsSet());
|
| DCHECK(ContainsWorker(outer_->workers_, worker));
|
| #endif
|
| -
|
| DCHECK_EQ(num_tasks_since_last_wait_, 0U);
|
|
|
| if (!last_detach_time_.is_null()) {
|
| @@ -577,7 +568,7 @@ void SchedulerWorkerPoolImpl::WakeUpOneWorker() {
|
| AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
|
|
|
| #if DCHECK_IS_ON()
|
| - DCHECK_EQ(workers_.empty(), !workers_created_.IsSignaled());
|
| + DCHECK_EQ(workers_.empty(), !workers_created_.IsSet());
|
| #endif
|
|
|
| if (workers_.empty())
|
|
|