Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3421)

Unified Diff: base/task_scheduler/scheduler_worker_pool_impl.cc

Issue 2362743002: Add TaskScheduler.NumTasksBetweenWaits.[pool] histogram. (Closed)
Patch Set: self-review Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « base/task_scheduler/scheduler_worker_pool_impl.h ('k') | tools/metrics/histograms/histograms.xml » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: base/task_scheduler/scheduler_worker_pool_impl.cc
diff --git a/base/task_scheduler/scheduler_worker_pool_impl.cc b/base/task_scheduler/scheduler_worker_pool_impl.cc
index 6b938ecae750e143c9d6aeac9b22706ddc20dcd7..8baf4401996707098472f01b9c601b237a98b6f7 100644
--- a/base/task_scheduler/scheduler_worker_pool_impl.cc
+++ b/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -34,6 +34,8 @@ namespace {
constexpr char kPoolNameSuffix[] = "Pool";
constexpr char kDetachDurationHistogramPrefix[] =
"TaskScheduler.DetachDuration.";
+constexpr char kNumTasksBetweenWaitsHistogramPrefix[] =
+ "TaskScheduler.NumTasksBetweenWaits.";
constexpr char kTaskLatencyHistogramPrefix[] = "TaskScheduler.TaskLatency.";
// SchedulerWorker that owns the current thread, if any.
@@ -266,6 +268,17 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
// Time when GetWork() first returned nullptr.
TimeTicks idle_start_time_;
+ // Indicates whether the last call to GetWork() returned nullptr.
+ bool last_get_work_returned_nullptr_ = true;
+
+ // Indicates whether the SchedulerWorker was detached since the last call to
+ // GetWork().
+ bool did_detach_since_last_get_work_ = false;
+
+ // Number of tasks executed since the last time the
+ // TaskScheduler.NumTasksBetweenWaits histogram was recorded.
+ size_t num_tasks_since_last_wait_ = 0;
+
subtle::Atomic32 num_single_threaded_runners_ = 0;
const int index_;
@@ -470,8 +483,12 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
DCHECK(ContainsWorker(outer_->workers_, worker));
#endif
- if (!detach_duration.is_max())
+ DCHECK_EQ(num_tasks_since_last_wait_, 0U);
+
+ if (!detach_duration.is_max()) {
outer_->detach_duration_histogram_->AddTime(detach_duration);
+ did_detach_since_last_get_work_ = true;
+ }
PlatformThread::SetName(
StringPrintf("TaskScheduler%sWorker%d", outer_->name_.c_str(), index_));
@@ -481,7 +498,7 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
tls_current_worker.Get().Set(worker);
tls_current_worker_pool.Get().Set(outer_);
- // New threads haven't run GetWork() yet, so reset the idle_start_time_.
+ // New threads haven't run GetWork() yet, so reset the |idle_start_time_|.
idle_start_time_ = TimeTicks();
ThreadRestrictions::SetIOAllowed(
@@ -494,6 +511,24 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
SchedulerWorker* worker) {
DCHECK(ContainsWorker(outer_->workers_, worker));
+ // Record the TaskScheduler.NumTasksBetweenWaits histogram if the
+ // SchedulerWorker waited on its WaitableEvent since the last GetWork().
+ //
+ // Note: When GetWork() returns nullptr for the first time after returning a
+ // Sequence, SchedulerWorker waits on its WaitableEvent. When the wait stops
+ // (either because WakeUp() was called or because the sleep timeout expired),
+ // GetWork() is called and the histogram is recorded. If GetWork() returns
+ // nullptr again, the SchedulerWorker may detach. If a detach happens,
+ // |did_detach_since_last_get_work_| is set to true and the next call to
robliao 2016/09/23 00:55:30 I would say here that did_detach_since_last_get_wo
fdoray 2016/09/23 19:30:02 Done.
+ // GetWork() won't record the histogram (which is correct since the
+ // SchedulerWorker didn't wait on its WaitableEvent since the last time the
+ // histogram was recorded).
+ if (last_get_work_returned_nullptr_ && !did_detach_since_last_get_work_) {
+ outer_->num_tasks_between_waits_histogram_->Add(num_tasks_since_last_wait_);
+ num_tasks_since_last_wait_ = 0;
+ }
+ did_detach_since_last_get_work_ = false;
+
scoped_refptr<Sequence> sequence;
{
std::unique_ptr<PriorityQueue::Transaction> shared_transaction(
@@ -520,6 +555,7 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
outer_->AddToIdleWorkersStack(worker);
if (idle_start_time_.is_null())
idle_start_time_ = TimeTicks::Now();
+ last_get_work_returned_nullptr_ = true;
return nullptr;
}
@@ -544,14 +580,17 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
DCHECK(sequence);
idle_start_time_ = TimeTicks();
-
outer_->RemoveFromIdleWorkersStack(worker);
robliao 2016/09/23 00:55:30 Nit: The ordering here isn't sensitive, right? Let
fdoray 2016/09/23 19:30:02 Local state update is now grouped together, and th
+ last_get_work_returned_nullptr_ = false;
+
return sequence;
}
void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask(
const Task* task,
const TimeDelta& task_latency) {
+ ++num_tasks_since_last_wait_;
+
const int priority_index = static_cast<int>(task->traits.priority());
// As explained in the header file, histograms are allocated on demand. It
@@ -624,12 +663,20 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
workers_created_(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED),
#endif
+ // Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
detach_duration_histogram_(Histogram::FactoryTimeGet(
kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix,
TimeDelta::FromMilliseconds(1),
TimeDelta::FromHours(1),
50,
HistogramBase::kUmaTargetedHistogramFlag)),
+ // Mimics the UMA_HISTOGRAM_COUNTS_10000 macro.
robliao 2016/09/23 00:55:30 Worth mentioning why we chose the 10000 macro to c
fdoray 2016/09/23 19:30:02 Done.
+ num_tasks_between_waits_histogram_(Histogram::FactoryGet(
+ kNumTasksBetweenWaitsHistogramPrefix + name_ + kPoolNameSuffix,
+ 1,
+ 10000,
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
task_tracker_(task_tracker),
delayed_task_manager_(delayed_task_manager) {
DCHECK(task_tracker_);
« no previous file with comments | « base/task_scheduler/scheduler_worker_pool_impl.h ('k') | tools/metrics/histograms/histograms.xml » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698