| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/task_scheduler/scheduler_worker_pool_impl.h" | 5 #include "base/task_scheduler/scheduler_worker_pool_impl.h" |
| 6 | 6 |
| 7 #include <stddef.h> | 7 #include <stddef.h> |
| 8 | 8 |
| 9 #include <algorithm> | 9 #include <algorithm> |
| 10 #include <utility> | 10 #include <utility> |
| (...skipping 16 matching lines...) Expand all Loading... |
| 27 #include "base/time/time.h" | 27 #include "base/time/time.h" |
| 28 | 28 |
| 29 namespace base { | 29 namespace base { |
| 30 namespace internal { | 30 namespace internal { |
| 31 | 31 |
| 32 namespace { | 32 namespace { |
| 33 | 33 |
| 34 constexpr char kPoolNameSuffix[] = "Pool"; | 34 constexpr char kPoolNameSuffix[] = "Pool"; |
| 35 constexpr char kDetachDurationHistogramPrefix[] = | 35 constexpr char kDetachDurationHistogramPrefix[] = |
| 36 "TaskScheduler.DetachDuration."; | 36 "TaskScheduler.DetachDuration."; |
| 37 constexpr char kNumTasksBetweenWaitsHistogramPrefix[] = |
| 38 "TaskScheduler.NumTasksBetweenWaits."; |
| 37 constexpr char kTaskLatencyHistogramPrefix[] = "TaskScheduler.TaskLatency."; | 39 constexpr char kTaskLatencyHistogramPrefix[] = "TaskScheduler.TaskLatency."; |
| 38 | 40 |
| 39 // SchedulerWorker that owns the current thread, if any. | 41 // SchedulerWorker that owns the current thread, if any. |
| 40 LazyInstance<ThreadLocalPointer<const SchedulerWorker>>::Leaky | 42 LazyInstance<ThreadLocalPointer<const SchedulerWorker>>::Leaky |
| 41 tls_current_worker = LAZY_INSTANCE_INITIALIZER; | 43 tls_current_worker = LAZY_INSTANCE_INITIALIZER; |
| 42 | 44 |
| 43 // SchedulerWorkerPool that owns the current thread, if any. | 45 // SchedulerWorkerPool that owns the current thread, if any. |
| 44 LazyInstance<ThreadLocalPointer<const SchedulerWorkerPool>>::Leaky | 46 LazyInstance<ThreadLocalPointer<const SchedulerWorkerPool>>::Leaky |
| 45 tls_current_worker_pool = LAZY_INSTANCE_INITIALIZER; | 47 tls_current_worker_pool = LAZY_INSTANCE_INITIALIZER; |
| 46 | 48 |
| (...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 259 // Single-threaded PriorityQueue for the worker. | 261 // Single-threaded PriorityQueue for the worker. |
| 260 PriorityQueue single_threaded_priority_queue_; | 262 PriorityQueue single_threaded_priority_queue_; |
| 261 | 263 |
| 262 // True if the last Sequence returned by GetWork() was extracted from | 264 // True if the last Sequence returned by GetWork() was extracted from |
| 263 // |single_threaded_priority_queue_|. | 265 // |single_threaded_priority_queue_|. |
| 264 bool last_sequence_is_single_threaded_ = false; | 266 bool last_sequence_is_single_threaded_ = false; |
| 265 | 267 |
| 266 // Time when GetWork() first returned nullptr. | 268 // Time when GetWork() first returned nullptr. |
| 267 TimeTicks idle_start_time_; | 269 TimeTicks idle_start_time_; |
| 268 | 270 |
| 271 // Indicates whether the last call to GetWork() returned nullptr. |
| 272 bool last_get_work_returned_nullptr_ = false; |
| 273 |
| 274 // Indicates whether the SchedulerWorker was detached since the last call to |
| 275 // GetWork(). |
| 276 bool did_detach_since_last_get_work_ = false; |
| 277 |
| 278 // Number of tasks executed since the last time the |
| 279 // TaskScheduler.NumTasksBetweenWaits histogram was recorded. |
| 280 size_t num_tasks_since_last_wait_ = 0; |
| 281 |
| 269 subtle::Atomic32 num_single_threaded_runners_ = 0; | 282 subtle::Atomic32 num_single_threaded_runners_ = 0; |
| 270 | 283 |
| 271 const int index_; | 284 const int index_; |
| 272 | 285 |
| 273 DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegateImpl); | 286 DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDelegateImpl); |
| 274 }; | 287 }; |
| 275 | 288 |
| 276 SchedulerWorkerPoolImpl::~SchedulerWorkerPoolImpl() { | 289 SchedulerWorkerPoolImpl::~SchedulerWorkerPoolImpl() { |
| 277 // SchedulerWorkerPool should never be deleted in production unless its | 290 // SchedulerWorkerPool should never be deleted in production unless its |
| 278 // initialization failed. | 291 // initialization failed. |
| (...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 463 void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry( | 476 void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry( |
| 464 SchedulerWorker* worker, | 477 SchedulerWorker* worker, |
| 465 const TimeDelta& detach_duration) { | 478 const TimeDelta& detach_duration) { |
| 466 #if DCHECK_IS_ON() | 479 #if DCHECK_IS_ON() |
| 467 // Wait for |outer_->workers_created_| to avoid traversing | 480 // Wait for |outer_->workers_created_| to avoid traversing |
| 468 // |outer_->workers_| while it is being filled by Initialize(). | 481 // |outer_->workers_| while it is being filled by Initialize(). |
| 469 outer_->workers_created_.Wait(); | 482 outer_->workers_created_.Wait(); |
| 470 DCHECK(ContainsWorker(outer_->workers_, worker)); | 483 DCHECK(ContainsWorker(outer_->workers_, worker)); |
| 471 #endif | 484 #endif |
| 472 | 485 |
| 473 if (!detach_duration.is_max()) | 486 DCHECK_EQ(num_tasks_since_last_wait_, 0U); |
| 487 |
| 488 if (!detach_duration.is_max()) { |
| 474 outer_->detach_duration_histogram_->AddTime(detach_duration); | 489 outer_->detach_duration_histogram_->AddTime(detach_duration); |
| 490 did_detach_since_last_get_work_ = true; |
| 491 } |
| 475 | 492 |
| 476 PlatformThread::SetName( | 493 PlatformThread::SetName( |
| 477 StringPrintf("TaskScheduler%sWorker%d", outer_->name_.c_str(), index_)); | 494 StringPrintf("TaskScheduler%sWorker%d", outer_->name_.c_str(), index_)); |
| 478 | 495 |
| 479 DCHECK(!tls_current_worker.Get().Get()); | 496 DCHECK(!tls_current_worker.Get().Get()); |
| 480 DCHECK(!tls_current_worker_pool.Get().Get()); | 497 DCHECK(!tls_current_worker_pool.Get().Get()); |
| 481 tls_current_worker.Get().Set(worker); | 498 tls_current_worker.Get().Set(worker); |
| 482 tls_current_worker_pool.Get().Set(outer_); | 499 tls_current_worker_pool.Get().Set(outer_); |
| 483 | 500 |
| 484 // New threads haven't run GetWork() yet, so reset the idle_start_time_. | 501 // New threads haven't run GetWork() yet, so reset the |idle_start_time_|. |
| 485 idle_start_time_ = TimeTicks(); | 502 idle_start_time_ = TimeTicks(); |
| 486 | 503 |
| 487 ThreadRestrictions::SetIOAllowed( | 504 ThreadRestrictions::SetIOAllowed( |
| 488 outer_->io_restriction_ == | 505 outer_->io_restriction_ == |
| 489 SchedulerWorkerPoolParams::IORestriction::ALLOWED); | 506 SchedulerWorkerPoolParams::IORestriction::ALLOWED); |
| 490 } | 507 } |
| 491 | 508 |
| 492 scoped_refptr<Sequence> | 509 scoped_refptr<Sequence> |
| 493 SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork( | 510 SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork( |
| 494 SchedulerWorker* worker) { | 511 SchedulerWorker* worker) { |
| 495 DCHECK(ContainsWorker(outer_->workers_, worker)); | 512 DCHECK(ContainsWorker(outer_->workers_, worker)); |
| 496 | 513 |
| 514 // Record the TaskScheduler.NumTasksBetweenWaits histogram if the |
| 515 // SchedulerWorker waited on its WaitableEvent since the last GetWork(). |
| 516 // |
| 517 // Note: When GetWork() returns nullptr for the first time after returning a |
| 518 // Sequence, SchedulerWorker waits on its WaitableEvent. When the wait stops |
| 519 // (either because WakeUp() was called or because the sleep timeout expired), |
| 520 // GetWork() is called and the histogram is recorded. If GetWork() returns |
| 521 // nullptr again, the SchedulerWorker may detach. |
| 522 // |did_detach_since_last_get_work_| is set to true from OnMainEntry() if the |
| 523 // SchedulerWorker detaches and wakes up again. The next call to GetWork() |
| 524 // won't record the histogram (which is correct since the SchedulerWorker |
| 525 // didn't wait on its WaitableEvent since the last time the histogram was |
| 526 // recorded). |
| 527 if (last_get_work_returned_nullptr_ && !did_detach_since_last_get_work_) { |
| 528 outer_->num_tasks_between_waits_histogram_->Add(num_tasks_since_last_wait_); |
| 529 num_tasks_since_last_wait_ = 0; |
| 530 } |
| 531 |
| 497 scoped_refptr<Sequence> sequence; | 532 scoped_refptr<Sequence> sequence; |
| 498 { | 533 { |
| 499 std::unique_ptr<PriorityQueue::Transaction> shared_transaction( | 534 std::unique_ptr<PriorityQueue::Transaction> shared_transaction( |
| 500 outer_->shared_priority_queue_.BeginTransaction()); | 535 outer_->shared_priority_queue_.BeginTransaction()); |
| 501 std::unique_ptr<PriorityQueue::Transaction> single_threaded_transaction( | 536 std::unique_ptr<PriorityQueue::Transaction> single_threaded_transaction( |
| 502 single_threaded_priority_queue_.BeginTransaction()); | 537 single_threaded_priority_queue_.BeginTransaction()); |
| 503 | 538 |
| 504 if (shared_transaction->IsEmpty() && | 539 if (shared_transaction->IsEmpty() && |
| 505 single_threaded_transaction->IsEmpty()) { | 540 single_threaded_transaction->IsEmpty()) { |
| 506 single_threaded_transaction.reset(); | 541 single_threaded_transaction.reset(); |
| 507 | 542 |
| 508 // |shared_transaction| is kept alive while |worker| is added to | 543 // |shared_transaction| is kept alive while |worker| is added to |
| 509 // |idle_workers_stack_| to avoid this race: | 544 // |idle_workers_stack_| to avoid this race: |
| 510 // 1. This thread creates a Transaction, finds |shared_priority_queue_| | 545 // 1. This thread creates a Transaction, finds |shared_priority_queue_| |
| 511 // empty and ends the Transaction. | 546 // empty and ends the Transaction. |
| 512 // 2. Other thread creates a Transaction, inserts a Sequence into | 547 // 2. Other thread creates a Transaction, inserts a Sequence into |
| 513 // |shared_priority_queue_| and ends the Transaction. This can't happen | 548 // |shared_priority_queue_| and ends the Transaction. This can't happen |
| 514 // if the Transaction of step 1 is still active because because there | 549 // if the Transaction of step 1 is still active because because there |
| 515 // can only be one active Transaction per PriorityQueue at a time. | 550 // can only be one active Transaction per PriorityQueue at a time. |
| 516 // 3. Other thread calls WakeUpOneWorker(). No thread is woken up because | 551 // 3. Other thread calls WakeUpOneWorker(). No thread is woken up because |
| 517 // |idle_workers_stack_| is empty. | 552 // |idle_workers_stack_| is empty. |
| 518 // 4. This thread adds itself to |idle_workers_stack_| and goes to sleep. | 553 // 4. This thread adds itself to |idle_workers_stack_| and goes to sleep. |
| 519 // No thread runs the Sequence inserted in step 2. | 554 // No thread runs the Sequence inserted in step 2. |
| 520 outer_->AddToIdleWorkersStack(worker); | 555 outer_->AddToIdleWorkersStack(worker); |
| 521 if (idle_start_time_.is_null()) | 556 if (idle_start_time_.is_null()) |
| 522 idle_start_time_ = TimeTicks::Now(); | 557 idle_start_time_ = TimeTicks::Now(); |
| 558 did_detach_since_last_get_work_ = false; |
| 559 last_get_work_returned_nullptr_ = true; |
| 523 return nullptr; | 560 return nullptr; |
| 524 } | 561 } |
| 525 | 562 |
| 526 // True if both PriorityQueues have Sequences and the Sequence at the top of | 563 // True if both PriorityQueues have Sequences and the Sequence at the top of |
| 527 // the shared PriorityQueue is more important. | 564 // the shared PriorityQueue is more important. |
| 528 const bool shared_sequence_is_more_important = | 565 const bool shared_sequence_is_more_important = |
| 529 !shared_transaction->IsEmpty() && | 566 !shared_transaction->IsEmpty() && |
| 530 !single_threaded_transaction->IsEmpty() && | 567 !single_threaded_transaction->IsEmpty() && |
| 531 shared_transaction->PeekSortKey() > | 568 shared_transaction->PeekSortKey() > |
| 532 single_threaded_transaction->PeekSortKey(); | 569 single_threaded_transaction->PeekSortKey(); |
| 533 | 570 |
| 534 if (single_threaded_transaction->IsEmpty() || | 571 if (single_threaded_transaction->IsEmpty() || |
| 535 shared_sequence_is_more_important) { | 572 shared_sequence_is_more_important) { |
| 536 sequence = shared_transaction->PopSequence(); | 573 sequence = shared_transaction->PopSequence(); |
| 537 last_sequence_is_single_threaded_ = false; | 574 last_sequence_is_single_threaded_ = false; |
| 538 } else { | 575 } else { |
| 539 DCHECK(!single_threaded_transaction->IsEmpty()); | 576 DCHECK(!single_threaded_transaction->IsEmpty()); |
| 540 sequence = single_threaded_transaction->PopSequence(); | 577 sequence = single_threaded_transaction->PopSequence(); |
| 541 last_sequence_is_single_threaded_ = true; | 578 last_sequence_is_single_threaded_ = true; |
| 542 } | 579 } |
| 543 } | 580 } |
| 544 DCHECK(sequence); | 581 DCHECK(sequence); |
| 545 | 582 |
| 583 outer_->RemoveFromIdleWorkersStack(worker); |
| 546 idle_start_time_ = TimeTicks(); | 584 idle_start_time_ = TimeTicks(); |
| 585 did_detach_since_last_get_work_ = false; |
| 586 last_get_work_returned_nullptr_ = false; |
| 547 | 587 |
| 548 outer_->RemoveFromIdleWorkersStack(worker); | |
| 549 return sequence; | 588 return sequence; |
| 550 } | 589 } |
| 551 | 590 |
| 552 void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask( | 591 void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask( |
| 553 const Task* task, | 592 const Task* task, |
| 554 const TimeDelta& task_latency) { | 593 const TimeDelta& task_latency) { |
| 594 ++num_tasks_since_last_wait_; |
| 595 |
| 555 const int priority_index = static_cast<int>(task->traits.priority()); | 596 const int priority_index = static_cast<int>(task->traits.priority()); |
| 556 | 597 |
| 557 // As explained in the header file, histograms are allocated on demand. It | 598 // As explained in the header file, histograms are allocated on demand. It |
| 558 // doesn't matter if an element of |task_latency_histograms_| is set multiple | 599 // doesn't matter if an element of |task_latency_histograms_| is set multiple |
| 559 // times since GetTaskLatencyHistogram() is idempotent. As explained in the | 600 // times since GetTaskLatencyHistogram() is idempotent. As explained in the |
| 560 // comment at the top of histogram_macros.h, barriers are required. | 601 // comment at the top of histogram_macros.h, barriers are required. |
| 561 HistogramBase* task_latency_histogram = reinterpret_cast<HistogramBase*>( | 602 HistogramBase* task_latency_histogram = reinterpret_cast<HistogramBase*>( |
| 562 subtle::Acquire_Load(&outer_->task_latency_histograms_[priority_index])); | 603 subtle::Acquire_Load(&outer_->task_latency_histograms_[priority_index])); |
| 563 if (!task_latency_histogram) { | 604 if (!task_latency_histogram) { |
| 564 task_latency_histogram = | 605 task_latency_histogram = |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 617 suggested_reclaim_time_(suggested_reclaim_time), | 658 suggested_reclaim_time_(suggested_reclaim_time), |
| 618 idle_workers_stack_lock_(shared_priority_queue_.container_lock()), | 659 idle_workers_stack_lock_(shared_priority_queue_.container_lock()), |
| 619 idle_workers_stack_cv_for_testing_( | 660 idle_workers_stack_cv_for_testing_( |
| 620 idle_workers_stack_lock_.CreateConditionVariable()), | 661 idle_workers_stack_lock_.CreateConditionVariable()), |
| 621 join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL, | 662 join_for_testing_returned_(WaitableEvent::ResetPolicy::MANUAL, |
| 622 WaitableEvent::InitialState::NOT_SIGNALED), | 663 WaitableEvent::InitialState::NOT_SIGNALED), |
| 623 #if DCHECK_IS_ON() | 664 #if DCHECK_IS_ON() |
| 624 workers_created_(WaitableEvent::ResetPolicy::MANUAL, | 665 workers_created_(WaitableEvent::ResetPolicy::MANUAL, |
| 625 WaitableEvent::InitialState::NOT_SIGNALED), | 666 WaitableEvent::InitialState::NOT_SIGNALED), |
| 626 #endif | 667 #endif |
| 668 // Mimics the UMA_HISTOGRAM_LONG_TIMES macro. |
| 627 detach_duration_histogram_(Histogram::FactoryTimeGet( | 669 detach_duration_histogram_(Histogram::FactoryTimeGet( |
| 628 kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix, | 670 kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix, |
| 629 TimeDelta::FromMilliseconds(1), | 671 TimeDelta::FromMilliseconds(1), |
| 630 TimeDelta::FromHours(1), | 672 TimeDelta::FromHours(1), |
| 631 50, | 673 50, |
| 632 HistogramBase::kUmaTargetedHistogramFlag)), | 674 HistogramBase::kUmaTargetedHistogramFlag)), |
| 675 // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is |
| 676 // expected to run between zero and a few tens of tasks between waits. |
| 677 // When it runs more than 100 tasks, there is no need to know the exact |
| 678 // number of tasks that ran. |
| 679 num_tasks_between_waits_histogram_(Histogram::FactoryGet( |
| 680 kNumTasksBetweenWaitsHistogramPrefix + name_ + kPoolNameSuffix, |
| 681 1, |
| 682 100, |
| 683 50, |
| 684 HistogramBase::kUmaTargetedHistogramFlag)), |
| 633 task_tracker_(task_tracker), | 685 task_tracker_(task_tracker), |
| 634 delayed_task_manager_(delayed_task_manager) { | 686 delayed_task_manager_(delayed_task_manager) { |
| 635 DCHECK(task_tracker_); | 687 DCHECK(task_tracker_); |
| 636 DCHECK(delayed_task_manager_); | 688 DCHECK(delayed_task_manager_); |
| 637 } | 689 } |
| 638 | 690 |
| 639 bool SchedulerWorkerPoolImpl::Initialize( | 691 bool SchedulerWorkerPoolImpl::Initialize( |
| 640 ThreadPriority priority_hint, | 692 ThreadPriority priority_hint, |
| 641 size_t max_threads, | 693 size_t max_threads, |
| 642 const ReEnqueueSequenceCallback& re_enqueue_sequence_callback) { | 694 const ReEnqueueSequenceCallback& re_enqueue_sequence_callback) { |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 700 AutoSchedulerLock auto_lock(idle_workers_stack_lock_); | 752 AutoSchedulerLock auto_lock(idle_workers_stack_lock_); |
| 701 idle_workers_stack_.Remove(worker); | 753 idle_workers_stack_.Remove(worker); |
| 702 } | 754 } |
| 703 | 755 |
| 704 bool SchedulerWorkerPoolImpl::CanWorkerDetachForTesting() { | 756 bool SchedulerWorkerPoolImpl::CanWorkerDetachForTesting() { |
| 705 return !worker_detachment_disallowed_.IsSet(); | 757 return !worker_detachment_disallowed_.IsSet(); |
| 706 } | 758 } |
| 707 | 759 |
| 708 } // namespace internal | 760 } // namespace internal |
| 709 } // namespace base | 761 } // namespace base |
| OLD | NEW |