Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(443)

Side by Side Diff: base/task_scheduler/scheduler_worker_pool_impl.cc

Issue 2801673002: Separate the create and start phases in SchedulerWorkerPoolImpl. (Closed)
Patch Set: CR-robliao-9 Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/task_scheduler/scheduler_worker_pool_impl.h" 5 #include "base/task_scheduler/scheduler_worker_pool_impl.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <utility> 10 #include <utility>
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
185 185
186 // Number of tasks executed since the last time the 186 // Number of tasks executed since the last time the
187 // TaskScheduler.NumTasksBeforeDetach histogram was recorded. 187 // TaskScheduler.NumTasksBeforeDetach histogram was recorded.
188 size_t num_tasks_since_last_detach_ = 0; 188 size_t num_tasks_since_last_detach_ = 0;
189 189
190 const int index_; 190 const int index_;
191 191
192 DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegateImpl); 192 DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegateImpl);
193 }; 193 };
194 194
195 SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
196 const std::string& name,
197 ThreadPriority priority_hint,
198 ReEnqueueSequenceCallback re_enqueue_sequence_callback,
199 TaskTracker* task_tracker,
200 DelayedTaskManager* delayed_task_manager)
201 : name_(name),
202 priority_hint_(priority_hint),
203 re_enqueue_sequence_callback_(std::move(re_enqueue_sequence_callback)),
204 idle_workers_stack_lock_(shared_priority_queue_.container_lock()),
205 idle_workers_stack_cv_for_testing_(
206 idle_workers_stack_lock_.CreateConditionVariable()),
207 join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL,
208 WaitableEvent::InitialState::NOT_SIGNALED),
209 #if DCHECK_IS_ON()
210 workers_created_(WaitableEvent::ResetPolicy::MANUAL,
211 WaitableEvent::InitialState::NOT_SIGNALED),
212 #endif
213 // Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
214 detach_duration_histogram_(Histogram::FactoryTimeGet(
215 kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix,
216 TimeDelta::FromMilliseconds(1),
217 TimeDelta::FromHours(1),
218 50,
219 HistogramBase::kUmaTargetedHistogramFlag)),
220 // Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more
221 // than 1000 tasks before detaching, there is no need to know the exact
222 // number of tasks that ran.
223 num_tasks_before_detach_histogram_(Histogram::FactoryGet(
224 kNumTasksBeforeDetachHistogramPrefix + name_ + kPoolNameSuffix,
225 1,
226 1000,
227 50,
228 HistogramBase::kUmaTargetedHistogramFlag)),
229 // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is
230 // expected to run between zero and a few tens of tasks between waits.
231 // When it runs more than 100 tasks, there is no need to know the exact
232 // number of tasks that ran.
233 num_tasks_between_waits_histogram_(Histogram::FactoryGet(
234 kNumTasksBetweenWaitsHistogramPrefix + name_ + kPoolNameSuffix,
235 1,
236 100,
237 50,
238 HistogramBase::kUmaTargetedHistogramFlag)),
239 task_tracker_(task_tracker),
240 delayed_task_manager_(delayed_task_manager) {
241 DCHECK(task_tracker_);
242 DCHECK(delayed_task_manager_);
243 }
244
245 void SchedulerWorkerPoolImpl::Start(const SchedulerWorkerPoolParams& params) {
246 suggested_reclaim_time_ = params.suggested_reclaim_time();
247
248 std::vector<SchedulerWorker*> workers_to_wake_up;
249
250 {
251 AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
252
253 #if DCHECK_IS_ON()
254 DCHECK(!workers_created_.IsSignaled());
255 #endif
256
257 DCHECK(workers_.empty());
258 workers_.resize(params.max_threads());
259
260 // The number of workers created alive is |num_wake_ups_before_start_|, plus
261 // one if the standby thread policy is ONE (in order to start with one alive
262 // idle worker).
263 const int num_alive_workers =
264 num_wake_ups_before_start_ +
265 (params.standby_thread_policy() ==
266 SchedulerWorkerPoolParams::StandbyThreadPolicy::ONE
267 ? 1
268 : 0);
269
270 // Create workers in reverse order of index so that the worker with the
271 // highest index is at the bottom of the idle stack.
272 for (int index = params.max_threads() - 1; index >= 0; --index) {
273 const SchedulerWorker::InitialState initial_state =
274 index < num_alive_workers ? SchedulerWorker::InitialState::ALIVE
275 : SchedulerWorker::InitialState::DETACHED;
276 scoped_refptr<SchedulerWorker> worker = SchedulerWorker::Create(
277 params.priority_hint(),
278 MakeUnique<SchedulerWorkerDelegateImpl>(
279 this, re_enqueue_sequence_callback_, index),
280 task_tracker_, initial_state, params.backward_compatibility());
281 if (!worker)
282 break;
283
284 if (index < num_wake_ups_before_start_)
285 workers_to_wake_up.push_back(worker.get());
286 else
287 idle_workers_stack_.Push(worker.get());
288
289 workers_[index] = std::move(worker);
290 }
291
292 #if DCHECK_IS_ON()
293 workers_created_.Signal();
294 #endif
295
296 CHECK(!workers_.empty());
297 }
298
299 for (SchedulerWorker* worker : workers_to_wake_up)
300 worker->WakeUp();
301 }
302
195 SchedulerWorkerPoolImpl::~SchedulerWorkerPoolImpl() { 303 SchedulerWorkerPoolImpl::~SchedulerWorkerPoolImpl() {
196 // SchedulerWorkerPool should never be deleted in production unless its 304 // SchedulerWorkerPool should never be deleted in production unless its
197 // initialization failed. 305 // initialization failed.
198 DCHECK(join_for_testing_returned_.IsSignaled() || workers_.empty()); 306 DCHECK(join_for_testing_returned_.IsSignaled() || workers_.empty());
199 } 307 }
200 308
201 // static
202 std::unique_ptr<SchedulerWorkerPoolImpl> SchedulerWorkerPoolImpl::Create(
203 const SchedulerWorkerPoolParams& params,
204 const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
205 TaskTracker* task_tracker,
206 DelayedTaskManager* delayed_task_manager) {
207 auto worker_pool = WrapUnique(
208 new SchedulerWorkerPoolImpl(params, task_tracker, delayed_task_manager));
209 if (worker_pool->Initialize(params, re_enqueue_sequence_callback))
210 return worker_pool;
211 return nullptr;
212 }
213
214 scoped_refptr<TaskRunner> SchedulerWorkerPoolImpl::CreateTaskRunnerWithTraits( 309 scoped_refptr<TaskRunner> SchedulerWorkerPoolImpl::CreateTaskRunnerWithTraits(
215 const TaskTraits& traits) { 310 const TaskTraits& traits) {
216 return make_scoped_refptr(new SchedulerParallelTaskRunner(traits, this)); 311 return make_scoped_refptr(new SchedulerParallelTaskRunner(traits, this));
217 } 312 }
218 313
219 scoped_refptr<SequencedTaskRunner> 314 scoped_refptr<SequencedTaskRunner>
220 SchedulerWorkerPoolImpl::CreateSequencedTaskRunnerWithTraits( 315 SchedulerWorkerPoolImpl::CreateSequencedTaskRunnerWithTraits(
221 const TaskTraits& traits) { 316 const TaskTraits& traits) {
222 return make_scoped_refptr(new SchedulerSequencedTaskRunner(traits, this)); 317 return make_scoped_refptr(new SchedulerSequencedTaskRunner(traits, this));
223 } 318 }
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
293 } 388 }
294 } 389 }
295 390
296 void SchedulerWorkerPoolImpl::GetHistograms( 391 void SchedulerWorkerPoolImpl::GetHistograms(
297 std::vector<const HistogramBase*>* histograms) const { 392 std::vector<const HistogramBase*>* histograms) const {
298 histograms->push_back(detach_duration_histogram_); 393 histograms->push_back(detach_duration_histogram_);
299 histograms->push_back(num_tasks_between_waits_histogram_); 394 histograms->push_back(num_tasks_between_waits_histogram_);
300 } 395 }
301 396
302 int SchedulerWorkerPoolImpl::GetMaxConcurrentTasksDeprecated() const { 397 int SchedulerWorkerPoolImpl::GetMaxConcurrentTasksDeprecated() const {
398 #if DCHECK_IS_ON()
399 DCHECK(workers_created_.IsSignaled());
400 #endif
303 return workers_.size(); 401 return workers_.size();
304 } 402 }
305 403
306 void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() { 404 void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() {
405 #if DCHECK_IS_ON()
406 DCHECK(workers_created_.IsSignaled());
407 #endif
307 AutoSchedulerLock auto_lock(idle_workers_stack_lock_); 408 AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
308 while (idle_workers_stack_.Size() < workers_.size()) 409 while (idle_workers_stack_.Size() < workers_.size())
309 idle_workers_stack_cv_for_testing_->Wait(); 410 idle_workers_stack_cv_for_testing_->Wait();
310 } 411 }
311 412
312 void SchedulerWorkerPoolImpl::JoinForTesting() { 413 void SchedulerWorkerPoolImpl::JoinForTesting() {
414 #if DCHECK_IS_ON()
415 DCHECK(workers_created_.IsSignaled());
416 #endif
313 DCHECK(!CanWorkerDetachForTesting() || suggested_reclaim_time_.is_max()) 417 DCHECK(!CanWorkerDetachForTesting() || suggested_reclaim_time_.is_max())
314 << "Workers can detach during join."; 418 << "Workers can detach during join.";
315 for (const auto& worker : workers_) 419 for (const auto& worker : workers_)
316 worker->JoinForTesting(); 420 worker->JoinForTesting();
317 421
318 DCHECK(!join_for_testing_returned_.IsSignaled()); 422 DCHECK(!join_for_testing_returned_.IsSignaled());
319 join_for_testing_returned_.Signal(); 423 join_for_testing_returned_.Signal();
320 } 424 }
321 425
322 void SchedulerWorkerPoolImpl::DisallowWorkerDetachmentForTesting() { 426 void SchedulerWorkerPoolImpl::DisallowWorkerDetachmentForTesting() {
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
458 } 562 }
459 563
460 void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnDetach() { 564 void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnDetach() {
461 DCHECK(!did_detach_since_last_get_work_); 565 DCHECK(!did_detach_since_last_get_work_);
462 outer_->num_tasks_before_detach_histogram_->Add(num_tasks_since_last_detach_); 566 outer_->num_tasks_before_detach_histogram_->Add(num_tasks_since_last_detach_);
463 num_tasks_since_last_detach_ = 0; 567 num_tasks_since_last_detach_ = 0;
464 did_detach_since_last_get_work_ = true; 568 did_detach_since_last_get_work_ = true;
465 last_detach_time_ = TimeTicks::Now(); 569 last_detach_time_ = TimeTicks::Now();
466 } 570 }
467 571
468 SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl( 572 void SchedulerWorkerPoolImpl::WakeUpOneWorker() {
469 const SchedulerWorkerPoolParams& params, 573 SchedulerWorker* worker = nullptr;
470 TaskTracker* task_tracker, 574 {
471 DelayedTaskManager* delayed_task_manager) 575 AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
472 : name_(params.name()), 576
473 suggested_reclaim_time_(params.suggested_reclaim_time()),
474 idle_workers_stack_lock_(shared_priority_queue_.container_lock()),
475 idle_workers_stack_cv_for_testing_(
476 idle_workers_stack_lock_.CreateConditionVariable()),
477 join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL,
478 WaitableEvent::InitialState::NOT_SIGNALED),
479 #if DCHECK_IS_ON() 577 #if DCHECK_IS_ON()
480 workers_created_(WaitableEvent::ResetPolicy::MANUAL, 578 DCHECK_EQ(workers_.empty(), !workers_created_.IsSignaled());
481 WaitableEvent::InitialState::NOT_SIGNALED),
482 #endif 579 #endif
483 // Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
484 detach_duration_histogram_(Histogram::FactoryTimeGet(
485 kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix,
486 TimeDelta::FromMilliseconds(1),
487 TimeDelta::FromHours(1),
488 50,
489 HistogramBase::kUmaTargetedHistogramFlag)),
490 // Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more
491 // than 1000 tasks before detaching, there is no need to know the exact
492 // number of tasks that ran.
493 num_tasks_before_detach_histogram_(Histogram::FactoryGet(
494 kNumTasksBeforeDetachHistogramPrefix + name_ + kPoolNameSuffix,
495 1,
496 1000,
497 50,
498 HistogramBase::kUmaTargetedHistogramFlag)),
499 // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is
500 // expected to run between zero and a few tens of tasks between waits.
501 // When it runs more than 100 tasks, there is no need to know the exact
502 // number of tasks that ran.
503 num_tasks_between_waits_histogram_(Histogram::FactoryGet(
504 kNumTasksBetweenWaitsHistogramPrefix + name_ + kPoolNameSuffix,
505 1,
506 100,
507 50,
508 HistogramBase::kUmaTargetedHistogramFlag)),
509 task_tracker_(task_tracker),
510 delayed_task_manager_(delayed_task_manager) {
511 DCHECK(task_tracker_);
512 DCHECK(delayed_task_manager_);
513 }
514 580
515 bool SchedulerWorkerPoolImpl::Initialize( 581 if (workers_.empty())
516 const SchedulerWorkerPoolParams& params, 582 ++num_wake_ups_before_start_;
517 const ReEnqueueSequenceCallback& re_enqueue_sequence_callback) { 583 else
518 AutoSchedulerLock auto_lock(idle_workers_stack_lock_); 584 worker = idle_workers_stack_.Pop();
519
520 DCHECK(workers_.empty());
521 workers_.resize(params.max_threads());
522
523 // Create workers and push them to the idle stack in reverse order of index.
524 // This ensures that they are woken up in order of index and that the ALIVE
525 // worker is on top of the stack.
526 for (int index = params.max_threads() - 1; index >= 0; --index) {
527 const bool is_standby_lazy =
528 params.standby_thread_policy() ==
529 SchedulerWorkerPoolParams::StandbyThreadPolicy::LAZY;
530 const SchedulerWorker::InitialState initial_state =
531 (index == 0 && !is_standby_lazy)
532 ? SchedulerWorker::InitialState::ALIVE
533 : SchedulerWorker::InitialState::DETACHED;
534 scoped_refptr<SchedulerWorker> worker = SchedulerWorker::Create(
535 params.priority_hint(),
536 MakeUnique<SchedulerWorkerDelegateImpl>(
537 this, re_enqueue_sequence_callback, index),
538 task_tracker_, initial_state, params.backward_compatibility());
539 if (!worker)
540 break;
541 idle_workers_stack_.Push(worker.get());
542 workers_[index] = std::move(worker);
543 } 585 }
544 586
545 #if DCHECK_IS_ON()
546 workers_created_.Signal();
547 #endif
548
549 return !workers_.empty();
550 }
551
552 void SchedulerWorkerPoolImpl::WakeUpOneWorker() {
553 SchedulerWorker* worker;
554 {
555 AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
556 worker = idle_workers_stack_.Pop();
557 }
558 if (worker) 587 if (worker)
559 worker->WakeUp(); 588 worker->WakeUp();
560 // TODO(robliao): Honor StandbyThreadPolicy::ONE here and consider adding 589 // TODO(robliao): Honor StandbyThreadPolicy::ONE here and consider adding
561 // hysteresis to the CanDetach check. See https://crbug.com/666041. 590 // hysteresis to the CanDetach check. See https://crbug.com/666041.
562 } 591 }
563 592
564 void SchedulerWorkerPoolImpl::AddToIdleWorkersStack( 593 void SchedulerWorkerPoolImpl::AddToIdleWorkersStack(
565 SchedulerWorker* worker) { 594 SchedulerWorker* worker) {
566 AutoSchedulerLock auto_lock(idle_workers_stack_lock_); 595 AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
567 // Detachment may cause multiple attempts to add because the delegate cannot 596 // Detachment may cause multiple attempts to add because the delegate cannot
(...skipping 19 matching lines...) Expand all
587 AutoSchedulerLock auto_lock(idle_workers_stack_lock_); 616 AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
588 idle_workers_stack_.Remove(worker); 617 idle_workers_stack_.Remove(worker);
589 } 618 }
590 619
591 bool SchedulerWorkerPoolImpl::CanWorkerDetachForTesting() { 620 bool SchedulerWorkerPoolImpl::CanWorkerDetachForTesting() {
592 return !worker_detachment_disallowed_.IsSet(); 621 return !worker_detachment_disallowed_.IsSet();
593 } 622 }
594 623
595 } // namespace internal 624 } // namespace internal
596 } // namespace base 625 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698