Chromium Code Reviews| Index: content/common/sequenced_worker_pool_unittest.cc |
| =================================================================== |
| --- content/common/sequenced_worker_pool_unittest.cc (revision 0) |
| +++ content/common/sequenced_worker_pool_unittest.cc (revision 0) |
| @@ -0,0 +1,256 @@ |
| +// Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/bind.h" |
| +#include "base/memory/ref_counted.h" |
| +#include "base/synchronization/lock.h" |
| +#include "base/synchronization/waitable_event.h" |
| +#include "base/threading/platform_thread.h" |
| +#include "content/common/sequenced_worker_pool.h" |
| +#include "testing/gtest/include/gtest/gtest.h" |
| + |
| +// IMPORTANT NOTE: |
| +// |
| +// Many of these tests have failure modes where they'll hang forever. These |
| +// tests should not be flaky, and hangling indicates a type of failure. Do not |
|
jar (doing other things)
2011/11/23 20:08:16
nit: typo hangling->hanging
|
| +// mark as flaky if they're hanging, it's likely an actual bug. |
| + |
| +namespace { |
| + |
| +const size_t kNumWorkerThreads = 3; |
| + |
| +class TestTracker : public base::RefCountedThreadSafe<TestTracker> { |
| + public: |
| + TestTracker() : completed_event_(false, false) { |
| + } |
| + |
| + // Each of these tasks appends the argument to the complete sequence vector |
| + // so calling code can see what order they finished in. |
| + void FastTask(int id) { |
| + SignalWorkerDone(id); |
| + } |
| + void SlowTask(int id) { |
| + base::PlatformThread::Sleep(1000); |
| + SignalWorkerDone(id); |
| + } |
| + void BlockOnEventTask(int id, base::WaitableEvent* event) { |
| + event->Wait(); |
| + SignalWorkerDone(id); |
| + } |
| + |
| + // Blocks the current thread until at least the given number of tasks are in |
| + // the completed vector, and then returns a copy. |
| + std::vector<int> WaitUntilTasksComplete(size_t num_tasks) { |
| + for (;;) { |
| + { |
| + base::AutoLock lock(lock_); |
| + if (complete_sequence_.size() >= num_tasks) |
| + return complete_sequence_; |
| + } |
| + completed_event_.Wait(); |
| + } |
| + } |
| + |
| + private: |
| + void SignalWorkerDone(int id) { |
| + base::AutoLock lock(lock_); |
| + complete_sequence_.push_back(id); |
| + completed_event_.Signal(); |
| + } |
| + |
| + // Protects the complete_sequence. |
| + base::Lock lock_; |
| + |
| + // Signaled every time something is posted to the complete_sequence_. |
| + base::WaitableEvent completed_event_; |
| + |
| + // Protected by lock_. |
| + std::vector<int> complete_sequence_; |
| +}; |
| + |
| +class SequencedWorkerPoolTest : public testing::Test, |
| + public SequencedWorkerPool::TestingObserver { |
| + public: |
| + SequencedWorkerPoolTest() |
| + : pool_(kNumWorkerThreads), |
| + tracker_(new TestTracker) { |
| + pool_.SetTestingObserver(this); |
| + } |
| + ~SequencedWorkerPoolTest() { |
| + } |
| + |
| + virtual void SetUp() { |
| + } |
| + virtual void TearDown() { |
| + } |
| + |
| + SequencedWorkerPool& pool() { return pool_; } |
| + TestTracker* tracker() { return tracker_.get(); } |
| + |
| + protected: |
| + // This closure will be executed right before the pool blocks on shutdown. |
| + base::Closure before_wait_for_shutdown_; |
| + |
| + private: |
| + // SequencedWorkerPool::TestingObserver implementation. |
| + virtual void WillWaitForShutdown() { |
| + if (!before_wait_for_shutdown_.is_null()) |
| + before_wait_for_shutdown_.Run(); |
| + } |
| + |
| + SequencedWorkerPool pool_; |
| + scoped_refptr<TestTracker> tracker_; |
| +}; |
| + |
| +// Checks that the given number of entries are in the tasks to complete of |
| +// the given tracker, and then signals the given event the given number of |
| +// times. This is used to wakt up blocked background threads before blocking |
| +// on shutdown. |
| +void EnsureTasksToCompleteCountAndSignal(TestTracker* tracker, |
| + size_t expected_tasks_to_complete, |
| + base::WaitableEvent* event, |
| + size_t times_to_signal_event) { |
| + EXPECT_EQ( |
| + expected_tasks_to_complete, |
| + tracker->WaitUntilTasksComplete(expected_tasks_to_complete).size()); |
| + |
| + for (size_t i = 0; i < times_to_signal_event; i++) |
| + event->Signal(); |
| +} |
| + |
| +} // namespace |
| + |
| +// Tests that posting a bunch of tasks (many more than the number of worker |
| +// threads) runs them all. |
| +TEST_F(SequencedWorkerPoolTest, LotsOfTasks) { |
| + pool().PostWorkerTask(SequencedWorkerPool::BLOCK_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::SlowTask, tracker(), 0)); |
| + |
| + const size_t kNumTasks = 17; |
| + for (size_t i = 1; i < kNumTasks; i++) { |
| + pool().PostWorkerTask(SequencedWorkerPool::BLOCK_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::FastTask, tracker(), i)); |
| + } |
| + |
| + std::vector<int> result = tracker()->WaitUntilTasksComplete(kNumTasks); |
| + EXPECT_EQ(kNumTasks, result.size()); |
| +} |
| + |
| +// Test that tasks with the same sequence token are executed in order but don't |
| +// affect other tasks. |
| +TEST_F(SequencedWorkerPoolTest, Sequence) { |
| + // Fill all the worker threads except one. |
| + base::WaitableEvent background_event(false, false); |
| + const size_t kNumBackgroundTasks = kNumWorkerThreads - 1; |
| + for (size_t i = 0; i < kNumBackgroundTasks; i++) { |
| + pool().PostWorkerTask(SequencedWorkerPool::BLOCK_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::BlockOnEventTask, |
| + tracker(), i, &background_event)); |
| + } |
| + |
| + // Create two tasks with the same sequence token, one that will block on the |
| + // event, and one which will just complete quickly when it's run. Since there |
| + // is one worker thread free, the first task will start and then block, and |
| + // the second task should be waiting. |
| + base::WaitableEvent event1(false, false); |
| + SequencedWorkerPool::SequenceToken token1 = pool().GetSequenceToken(); |
| + pool().PostSequencedWorkerTask( |
| + token1, SequencedWorkerPool::BLOCK_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::BlockOnEventTask, tracker(), 100, |
| + &event1)); |
| + pool().PostSequencedWorkerTask( |
| + token1, SequencedWorkerPool::BLOCK_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::FastTask, tracker(), 101)); |
| + EXPECT_EQ(0u, tracker()->WaitUntilTasksComplete(0).size()); |
| + |
| + // Create another two tasks as above with a different token. These will be |
| + // blocked since there are no slots to run. |
| + SequencedWorkerPool::SequenceToken token2 = pool().GetSequenceToken(); |
| + pool().PostSequencedWorkerTask( |
| + token2, SequencedWorkerPool::BLOCK_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::FastTask, tracker(), 200)); |
| + pool().PostSequencedWorkerTask( |
| + token2, SequencedWorkerPool::BLOCK_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::FastTask, tracker(), 201)); |
| + EXPECT_EQ(0u, tracker()->WaitUntilTasksComplete(0).size()); |
| + |
| + // Let one background task complete. This should then let both tasks of |
| + // token2 run to completion in order. The second task of token1 should still |
| + // be blocked. |
| + background_event.Signal(); |
| + std::vector<int> result = tracker()->WaitUntilTasksComplete(3); |
| + ASSERT_EQ(3u, result.size()); |
| + EXPECT_EQ(200, result[1]); |
| + EXPECT_EQ(201, result[2]); |
| + |
| + // Finish the rest of the background tasks. This should leave some workers |
| + // free with the second token1 task still blocked on the first. |
| + for (size_t i = 0; i < kNumBackgroundTasks - 1; i++) |
| + background_event.Signal(); |
| + EXPECT_EQ(kNumBackgroundTasks + 2, |
| + tracker()->WaitUntilTasksComplete(kNumBackgroundTasks + 2).size()); |
| + |
| + // Allow the first task of token1 to complete. This should run the second. |
| + event1.Signal(); |
| + result = tracker()->WaitUntilTasksComplete(kNumBackgroundTasks + 4); |
| + ASSERT_EQ(kNumBackgroundTasks + 4, result.size()); |
| + EXPECT_EQ(100, result[result.size() - 2]); |
| + EXPECT_EQ(101, result[result.size() - 1]); |
| +} |
| + |
| +// Tests that unrun tasks are discarded properly according to their shutdown |
| +// mode. |
| +TEST_F(SequencedWorkerPoolTest, DiscardOnShutdown) { |
| + // Start tasks to take all the threads and block them. |
| + base::WaitableEvent background_event(false, false); |
| + for (size_t i = 0; i < kNumWorkerThreads; i++) { |
| + pool().PostWorkerTask(SequencedWorkerPool::BLOCK_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::BlockOnEventTask, |
| + tracker(), i, &background_event)); |
| + } |
| + |
| + // Create some tasks with different shutdown modes. |
| + pool().PostWorkerTask(SequencedWorkerPool::CONTINUE_ON_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::FastTask, tracker(), 100)); |
| + pool().PostWorkerTask(SequencedWorkerPool::SKIP_ON_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::FastTask, tracker(), 101)); |
| + pool().PostWorkerTask(SequencedWorkerPool::BLOCK_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::FastTask, tracker(), 102)); |
| + |
| + // Shutdown the woeker pool. This should discard all non-blocking tasks. |
| + before_wait_for_shutdown_ = |
| + base::Bind(&EnsureTasksToCompleteCountAndSignal, tracker(), 0, |
| + &background_event, kNumWorkerThreads); |
| + pool().Shutdown(); |
| + |
| + std::vector<int> result = tracker()->WaitUntilTasksComplete(4); |
| + ASSERT_EQ(4, result.size()); |
| + |
| + // The last item should be the BLOCK_SHUTDOWN task, the other two should have |
| + // been dropped. |
| + EXPECT_EQ(102, result[3]); |
| +} |
| + |
| +// Tests that CONTINUE_ON_SHUTDOWN tasks don't block shutdown. |
| +TEST_F(SequencedWorkerPoolTest, ContinueOnShutdown) { |
| + base::WaitableEvent background_event(false, false); |
| + pool().PostWorkerTask(SequencedWorkerPool::CONTINUE_ON_SHUTDOWN, FROM_HERE, |
| + base::Bind(&TestTracker::BlockOnEventTask, |
| + tracker(), 0, &background_event)); |
| + |
| + // This should not block. If this test hangs, it means it failed. |
| + pool().Shutdown(); |
| + |
| + // The task should not have completed yet. |
| + EXPECT_EQ(0u, tracker()->WaitUntilTasksComplete(0).size()); |
| + |
| + // Posting more tasks should fail. |
| + EXPECT_FALSE(pool().PostWorkerTask(SequencedWorkerPool::CONTINUE_ON_SHUTDOWN, |
| + FROM_HERE, base::Bind(&TestTracker::FastTask, tracker(), 0))); |
| + |
| + // Continue the background thread and make sure the task can complete. |
| + background_event.Signal(); |
| + std::vector<int> result = tracker()->WaitUntilTasksComplete(1); |
| + EXPECT_EQ(1, result.size()); |
| +} |