Index: base/task_scheduler/scheduler_worker_pool_impl_unittest.cc |
diff --git a/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc b/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc |
index 0af4eeee466043e02dd4089d2ce862f30be69259..2f5c462ef9bb6769724300c6af6af0cd14f1d6ee 100644 |
--- a/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc |
+++ b/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc |
@@ -17,6 +17,9 @@ |
#include "base/macros.h" |
#include "base/memory/ptr_util.h" |
#include "base/memory/ref_counted.h" |
+#include "base/metrics/histogram.h" |
+#include "base/metrics/histogram_samples.h" |
+#include "base/metrics/statistics_recorder.h" |
#include "base/synchronization/condition_variable.h" |
#include "base/synchronization/lock.h" |
#include "base/synchronization/waitable_event.h" |
@@ -40,11 +43,16 @@ namespace base { |
namespace internal { |
namespace { |
-const size_t kNumWorkersInWorkerPool = 4; |
-const size_t kNumThreadsPostingTasks = 4; |
-const size_t kNumTasksPostedPerThread = 150; |
+constexpr size_t kNumWorkersInWorkerPool = 4; |
+constexpr size_t kNumThreadsPostingTasks = 4; |
+constexpr size_t kNumTasksPostedPerThread = 150; |
+// This can't be lower because Windows' WaitableEvent wakes up too early when a |
+// small timeout is used. This results in many spurious wake ups before a worker |
+// is allowed to detach. |
constexpr TimeDelta kReclaimTimeForDetachTests = |
- TimeDelta::FromMilliseconds(10); |
+ TimeDelta::FromMilliseconds(500); |
+constexpr TimeDelta kExtraTimeToWaitForDetach = |
+ TimeDelta::FromMilliseconds(200); |
using IORestriction = SchedulerWorkerPoolParams::IORestriction; |
@@ -69,7 +77,7 @@ class TaskSchedulerWorkerPoolImplTest |
TaskSchedulerWorkerPoolImplTest() = default; |
void SetUp() override { |
- InitializeWorkerPool(TimeDelta::Max()); |
+ InitializeWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool); |
} |
void TearDown() override { |
@@ -77,12 +85,11 @@ class TaskSchedulerWorkerPoolImplTest |
worker_pool_->JoinForTesting(); |
} |
- void InitializeWorkerPool(const TimeDelta& suggested_reclaim_time) { |
+ void InitializeWorkerPool(const TimeDelta& suggested_reclaim_time, |
+ size_t num_workers) { |
worker_pool_ = SchedulerWorkerPoolImpl::Create( |
- SchedulerWorkerPoolParams("TestWorkerPoolWithFileIO", |
- ThreadPriority::NORMAL, |
- IORestriction::ALLOWED, |
- kNumWorkersInWorkerPool, |
+ SchedulerWorkerPoolParams("TestWorkerPool", ThreadPriority::NORMAL, |
+ IORestriction::ALLOWED, num_workers, |
suggested_reclaim_time), |
Bind(&TaskSchedulerWorkerPoolImplTest::ReEnqueueSequenceCallback, |
Unretained(this)), |
@@ -437,7 +444,7 @@ class TaskSchedulerWorkerPoolSingleThreadedTest |
protected: |
void SetUp() override { |
- InitializeWorkerPool(kReclaimTimeForDetachTests); |
+ InitializeWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool); |
} |
TaskSchedulerWorkerPoolSingleThreadedTest() = default; |
@@ -469,8 +476,7 @@ TEST_F(TaskSchedulerWorkerPoolSingleThreadedTest, SingleThreadTask) { |
worker_pool_->WaitForAllWorkersIdleForTesting(); |
// Give the worker pool a chance to reclaim its threads. |
- PlatformThread::Sleep( |
- kReclaimTimeForDetachTests + TimeDelta::FromMilliseconds(200)); |
+ PlatformThread::Sleep(kReclaimTimeForDetachTests + kExtraTimeToWaitForDetach); |
worker_pool_->DisallowWorkerDetachmentForTesting(); |
@@ -509,7 +515,7 @@ class TaskSchedulerWorkerPoolCheckTlsReuse |
WaitableEvent::InitialState::NOT_SIGNALED) {} |
void SetUp() override { |
- InitializeWorkerPool(kReclaimTimeForDetachTests); |
+ InitializeWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool); |
} |
subtle::Atomic32 zero_tls_values_ = 0; |
@@ -548,8 +554,7 @@ TEST_F(TaskSchedulerWorkerPoolCheckTlsReuse, CheckDetachedThreads) { |
waiter_.Reset(); |
// Give the worker pool a chance to detach its threads. |
- PlatformThread::Sleep( |
- kReclaimTimeForDetachTests + TimeDelta::FromMilliseconds(200)); |
+ PlatformThread::Sleep(kReclaimTimeForDetachTests + kExtraTimeToWaitForDetach); |
worker_pool_->DisallowWorkerDetachmentForTesting(); |
@@ -578,5 +583,145 @@ TEST_F(TaskSchedulerWorkerPoolCheckTlsReuse, CheckDetachedThreads) { |
waiter_.Signal(); |
} |
+namespace { |
+ |
+class TaskSchedulerWorkerPoolHistogramTest |
+ : public TaskSchedulerWorkerPoolImplTest { |
+ public: |
+ TaskSchedulerWorkerPoolHistogramTest() = default; |
+ |
+ protected: |
+ void SetUp() override {} |
+ |
+ void TearDown() override { worker_pool_->JoinForTesting(); } |
+ |
+ private: |
+ std::unique_ptr<StatisticsRecorder> statistics_recorder_ = |
+ StatisticsRecorder::CreateTemporaryForTesting(); |
+ |
+ private: |
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolHistogramTest); |
+}; |
+ |
+} // namespace |
+ |
+TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) { |
+ WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL, |
+ WaitableEvent::InitialState::NOT_SIGNALED); |
+ InitializeWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool); |
+ auto task_runner = worker_pool_->CreateTaskRunnerWithTraits( |
+ TaskTraits(), ExecutionMode::SEQUENCED); |
+ |
+ // Post a task. |
+ task_runner->PostTask(FROM_HERE, |
+ Bind(&WaitableEvent::Wait, Unretained(&event))); |
+ |
+ // Post 2 more tasks while the first task hasn't completed its execution. It |
+ // is guaranteed that these tasks will run immediately after the first task, |
+ // without allowing the worker to sleep. |
+ task_runner->PostTask(FROM_HERE, Bind(&DoNothing)); |
+ task_runner->PostTask(FROM_HERE, Bind(&DoNothing)); |
+ |
+ // Allow tasks to run and wait until the SchedulerWorker is idle. |
+ event.Signal(); |
+ worker_pool_->WaitForAllWorkersIdleForTesting(); |
+ |
+ // Wake up the SchedulerWorker that just became idle by posting a task and |
+ // wait until it becomes idle again. The SchedulerWorker should record the |
+ // TaskScheduler.NumTasksBetweenWaits.* histogram on wake up. |
+ task_runner->PostTask(FROM_HERE, Bind(&DoNothing)); |
+ worker_pool_->WaitForAllWorkersIdleForTesting(); |
+ |
+ // Verify that counts were recorded to the histogram as expected. |
+ EXPECT_EQ(0, worker_pool_->num_tasks_between_waits_histogram_for_testing() |
+ ->SnapshotSamples() |
+ ->GetCount(0)); |
+ EXPECT_EQ(1, worker_pool_->num_tasks_between_waits_histogram_for_testing() |
+ ->SnapshotSamples() |
+ ->GetCount(3)); |
+ EXPECT_EQ(0, worker_pool_->num_tasks_between_waits_histogram_for_testing() |
+ ->SnapshotSamples() |
+ ->GetCount(10)); |
+} |
+ |
+namespace { |
+ |
+void SignalAndWaitEvent(WaitableEvent* signal_event, |
+ WaitableEvent* wait_event) { |
+ signal_event->Signal(); |
+ wait_event->Wait(); |
+} |
+ |
+} // namespace |
+ |
+TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaitsWithDetach) { |
+ WaitableEvent tasks_can_exit_event(WaitableEvent::ResetPolicy::MANUAL, |
+ WaitableEvent::InitialState::NOT_SIGNALED); |
+ InitializeWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool); |
+ auto task_runner = worker_pool_->CreateTaskRunnerWithTraits( |
+ TaskTraits(), ExecutionMode::PARALLEL); |
+ |
+ // Post tasks to saturate the pool. |
+ std::vector<std::unique_ptr<WaitableEvent>> task_started_events; |
+ for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
+ task_started_events.push_back( |
+ MakeUnique<WaitableEvent>(WaitableEvent::ResetPolicy::MANUAL, |
+ WaitableEvent::InitialState::NOT_SIGNALED)); |
+ task_runner->PostTask( |
+ FROM_HERE, |
+ Bind(&SignalAndWaitEvent, Unretained(task_started_events.back().get()), |
+ Unretained(&tasks_can_exit_event))); |
+ } |
+ for (const auto& task_started_event : task_started_events) |
+ task_started_event->Wait(); |
+ |
+ // Allow tasks to complete their execution and wait to allow workers to |
+ // detach. |
+ tasks_can_exit_event.Signal(); |
+ worker_pool_->WaitForAllWorkersIdleForTesting(); |
+ PlatformThread::Sleep(kReclaimTimeForDetachTests + kExtraTimeToWaitForDetach); |
+ |
+ // Wake up SchedulerWorkers by posting tasks. They should record the |
+ // TaskScheduler.NumTasksBetweenWaits.* histogram on wake up. |
+ tasks_can_exit_event.Reset(); |
+ task_started_events.clear(); |
+ for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) { |
+ task_started_events.push_back( |
+ MakeUnique<WaitableEvent>(WaitableEvent::ResetPolicy::MANUAL, |
+ WaitableEvent::InitialState::NOT_SIGNALED)); |
+ task_runner->PostTask( |
+ FROM_HERE, |
+ Bind(&SignalAndWaitEvent, Unretained(task_started_events.back().get()), |
+ Unretained(&tasks_can_exit_event))); |
+ } |
+ for (const auto& task_started_event : task_started_events) |
+ task_started_event->Wait(); |
+ |
+ // Verify that counts were recorded to the histogram as expected. |
+ // - The "0" bucket has a count of at least 1 because the SchedulerWorker on |
+ // top of the idle stack isn't allowed to detach when its sleep timeout |
+ // expires. Instead, it waits on its WaitableEvent again without running a |
+ // task. The count may be higher than 1 because of spurious wake ups before |
+ // the sleep timeout expires. |
+ EXPECT_GE(worker_pool_->num_tasks_between_waits_histogram_for_testing() |
+ ->SnapshotSamples() |
+ ->GetCount(0), |
+ 1); |
+ // - The "1" bucket has a count of |kNumWorkersInWorkerPool| because each |
+ // SchedulerWorker ran a task before waiting on its WaitableEvent at the |
+ // beginning of the test. |
+ EXPECT_EQ(static_cast<int>(kNumWorkersInWorkerPool), |
+ worker_pool_->num_tasks_between_waits_histogram_for_testing() |
+ ->SnapshotSamples() |
+ ->GetCount(1)); |
+ EXPECT_EQ(0, worker_pool_->num_tasks_between_waits_histogram_for_testing() |
+ ->SnapshotSamples() |
+ ->GetCount(10)); |
+ |
+ tasks_can_exit_event.Signal(); |
+ worker_pool_->WaitForAllWorkersIdleForTesting(); |
+ worker_pool_->DisallowWorkerDetachmentForTesting(); |
+} |
+ |
} // namespace internal |
} // namespace base |