| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/task_scheduler/scheduler_worker_pool_impl.h" | 5 #include "base/task_scheduler/scheduler_worker_pool_impl.h" |
| 6 | 6 |
| 7 #include <stddef.h> | 7 #include <stddef.h> |
| 8 | 8 |
| 9 #include <algorithm> | 9 #include <algorithm> |
| 10 #include <utility> | 10 #include <utility> |
| (...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 231 ~SchedulerWorkerDelegateImpl() override; | 231 ~SchedulerWorkerDelegateImpl() override; |
| 232 | 232 |
| 233 PriorityQueue* single_threaded_priority_queue() { | 233 PriorityQueue* single_threaded_priority_queue() { |
| 234 return &single_threaded_priority_queue_; | 234 return &single_threaded_priority_queue_; |
| 235 } | 235 } |
| 236 | 236 |
| 237 // SchedulerWorker::Delegate: | 237 // SchedulerWorker::Delegate: |
| 238 void OnMainEntry(SchedulerWorker* worker, | 238 void OnMainEntry(SchedulerWorker* worker, |
| 239 const TimeDelta& detach_duration) override; | 239 const TimeDelta& detach_duration) override; |
| 240 scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override; | 240 scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override; |
| 241 void DidRunTask(const Task* task, const TimeDelta& task_latency) override; | 241 void DidRunTaskWithPriority(TaskPriority task_priority, |
| 242 const TimeDelta& task_latency) override; |
| 242 void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override; | 243 void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override; |
| 243 TimeDelta GetSleepTimeout() override; | 244 TimeDelta GetSleepTimeout() override; |
| 244 bool CanDetach(SchedulerWorker* worker) override; | 245 bool CanDetach(SchedulerWorker* worker) override; |
| 245 | 246 |
| 246 void RegisterSingleThreadTaskRunner() { | 247 void RegisterSingleThreadTaskRunner() { |
| 247 // No barrier as barriers only affect sequential consistency which is | 248 // No barrier as barriers only affect sequential consistency which is |
| 248 // irrelevant in a single variable use case (they don't force an immediate | 249 // irrelevant in a single variable use case (they don't force an immediate |
| 249 // flush anymore than atomics do by default). | 250 // flush anymore than atomics do by default). |
| 250 subtle::NoBarrier_AtomicIncrement(&num_single_threaded_runners_, 1); | 251 subtle::NoBarrier_AtomicIncrement(&num_single_threaded_runners_, 1); |
| 251 } | 252 } |
| (...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 581 DCHECK(sequence); | 582 DCHECK(sequence); |
| 582 | 583 |
| 583 outer_->RemoveFromIdleWorkersStack(worker); | 584 outer_->RemoveFromIdleWorkersStack(worker); |
| 584 idle_start_time_ = TimeTicks(); | 585 idle_start_time_ = TimeTicks(); |
| 585 did_detach_since_last_get_work_ = false; | 586 did_detach_since_last_get_work_ = false; |
| 586 last_get_work_returned_nullptr_ = false; | 587 last_get_work_returned_nullptr_ = false; |
| 587 | 588 |
| 588 return sequence; | 589 return sequence; |
| 589 } | 590 } |
| 590 | 591 |
| 591 void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask( | 592 void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl:: |
| 592 const Task* task, | 593 DidRunTaskWithPriority(TaskPriority task_priority, |
| 593 const TimeDelta& task_latency) { | 594 const TimeDelta& task_latency) { |
| 594 ++num_tasks_since_last_wait_; | 595 ++num_tasks_since_last_wait_; |
| 595 | 596 |
| 596 const int priority_index = static_cast<int>(task->traits.priority()); | 597 const int priority_index = static_cast<int>(task_priority); |
| 597 | 598 |
| 598 // As explained in the header file, histograms are allocated on demand. It | 599 // As explained in the header file, histograms are allocated on demand. It |
| 599 // doesn't matter if an element of |task_latency_histograms_| is set multiple | 600 // doesn't matter if an element of |task_latency_histograms_| is set multiple |
| 600 // times since GetTaskLatencyHistogram() is idempotent. As explained in the | 601 // times since GetTaskLatencyHistogram() is idempotent. As explained in the |
| 601 // comment at the top of histogram_macros.h, barriers are required. | 602 // comment at the top of histogram_macros.h, barriers are required. |
| 602 HistogramBase* task_latency_histogram = reinterpret_cast<HistogramBase*>( | 603 HistogramBase* task_latency_histogram = reinterpret_cast<HistogramBase*>( |
| 603 subtle::Acquire_Load(&outer_->task_latency_histograms_[priority_index])); | 604 subtle::Acquire_Load(&outer_->task_latency_histograms_[priority_index])); |
| 604 if (!task_latency_histogram) { | 605 if (!task_latency_histogram) { |
| 605 task_latency_histogram = | 606 task_latency_histogram = |
| 606 GetTaskLatencyHistogram(outer_->name_, task->traits.priority()); | 607 GetTaskLatencyHistogram(outer_->name_, task_priority); |
| 607 subtle::Release_Store( | 608 subtle::Release_Store( |
| 608 &outer_->task_latency_histograms_[priority_index], | 609 &outer_->task_latency_histograms_[priority_index], |
| 609 reinterpret_cast<subtle::AtomicWord>(task_latency_histogram)); | 610 reinterpret_cast<subtle::AtomicWord>(task_latency_histogram)); |
| 610 } | 611 } |
| 611 | 612 |
| 612 task_latency_histogram->AddTime(task_latency); | 613 task_latency_histogram->AddTime(task_latency); |
| 613 } | 614 } |
| 614 | 615 |
| 615 void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl:: | 616 void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl:: |
| 616 ReEnqueueSequence(scoped_refptr<Sequence> sequence) { | 617 ReEnqueueSequence(scoped_refptr<Sequence> sequence) { |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 752 AutoSchedulerLock auto_lock(idle_workers_stack_lock_); | 753 AutoSchedulerLock auto_lock(idle_workers_stack_lock_); |
| 753 idle_workers_stack_.Remove(worker); | 754 idle_workers_stack_.Remove(worker); |
| 754 } | 755 } |
| 755 | 756 |
| 756 bool SchedulerWorkerPoolImpl::CanWorkerDetachForTesting() { | 757 bool SchedulerWorkerPoolImpl::CanWorkerDetachForTesting() { |
| 757 return !worker_detachment_disallowed_.IsSet(); | 758 return !worker_detachment_disallowed_.IsSet(); |
| 758 } | 759 } |
| 759 | 760 |
| 760 } // namespace internal | 761 } // namespace internal |
| 761 } // namespace base | 762 } // namespace base |
| OLD | NEW |