Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(23)

Unified Diff: content/browser/download/parallel_download_job_unittest.cc

Issue 2767593003: Handle early pause, cancel for parallel requests. (Closed)
Patch Set: Move is_canceled_ to subclass. Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « content/browser/download/parallel_download_job.cc ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: content/browser/download/parallel_download_job_unittest.cc
diff --git a/content/browser/download/parallel_download_job_unittest.cc b/content/browser/download/parallel_download_job_unittest.cc
index 394da4163942f0276df755a842ca346cacf66c4c..60868ffd74cf2accf4322ace2e0d755fd4aae8af 100644
--- a/content/browser/download/parallel_download_job_unittest.cc
+++ b/content/browser/download/parallel_download_job_unittest.cc
@@ -46,12 +46,24 @@ class ParallelDownloadJobForTest : public ParallelDownloadJob {
request_count_(request_count) {}
void CreateRequest(int64_t offset, int64_t length) override {
- fake_tasks_.push_back(std::pair<int64_t, int64_t>(offset, length));
+ std::unique_ptr<DownloadWorker> worker =
+ base::MakeUnique<DownloadWorker>(this, offset, length);
+
+ DCHECK(workers_.find(offset) == workers_.end());
+ workers_[offset] = std::move(worker);
}
+ ParallelDownloadJob::WorkerMap& workers() { return workers_; }
+
int GetParallelRequestCount() const override { return request_count_; }
- std::vector<std::pair<int64_t, int64_t>> fake_tasks_;
+ void OnByteStreamReady(
+ DownloadWorker* worker,
+ std::unique_ptr<ByteStreamReader> stream_reader) override {
+ CountOnByteStreamReady();
+ }
+
+ MOCK_METHOD0(CountOnByteStreamReady, void());
private:
int request_count_;
@@ -70,23 +82,49 @@ class ParallelDownloadJobTest : public testing::Test {
DownloadCreateInfo info;
info.offset = offset;
info.total_bytes = content_length;
+ std::unique_ptr<MockDownloadRequestHandle> request_handle =
+ base::MakeUnique<MockDownloadRequestHandle>();
+ mock_request_handle_ = request_handle.get();
job_ = base::MakeUnique<ParallelDownloadJobForTest>(
- download_item_.get(), base::MakeUnique<MockDownloadRequestHandle>(),
- info, request_count);
+ download_item_.get(), std::move(request_handle), info, request_count);
}
void DestroyParallelJob() {
job_.reset();
download_item_.reset();
item_delegate_.reset();
+ mock_request_handle_ = nullptr;
}
void BuildParallelRequests() { job_->BuildParallelRequests(); }
+ bool IsJobCanceled() const { return job_->is_canceled_; };
+
+ void MakeWorkerReady(
+ DownloadWorker* worker,
+ std::unique_ptr<MockDownloadRequestHandle> request_handle) {
+ UrlDownloader::Delegate* delegate =
+ static_cast<UrlDownloader::Delegate*>(worker);
+ std::unique_ptr<DownloadCreateInfo> create_info =
+ base::MakeUnique<DownloadCreateInfo>();
+ create_info->request_handle = std::move(request_handle);
+ delegate->OnUrlDownloaderStarted(
+ std::move(create_info), std::unique_ptr<ByteStreamReader>(),
+ DownloadUrlParameters::OnStartedCallback());
+ }
+
+ void VerifyWorker(int64_t offset, int64_t length) const {
+ EXPECT_TRUE(job_->workers_.find(offset) != job_->workers_.end());
+ EXPECT_EQ(offset, job_->workers_[offset]->offset());
+ EXPECT_EQ(length, job_->workers_[offset]->length());
+ }
+
content::TestBrowserThreadBundle browser_threads_;
std::unique_ptr<DownloadItemImplDelegate> item_delegate_;
std::unique_ptr<MockDownloadItemImpl> download_item_;
std::unique_ptr<ParallelDownloadJobForTest> job_;
+ // Request handle for the original request.
+ MockDownloadRequestHandle* mock_request_handle_;
};
// Test if parallel requests can be built correctly for a new download.
@@ -96,10 +134,8 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequests) {
// Task 1: Range:50-, for 50 bytes.
CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2);
BuildParallelRequests();
- EXPECT_EQ(1, static_cast<int>(job_->fake_tasks_.size()));
- EXPECT_EQ(50, job_->fake_tasks_[0].first);
- EXPECT_EQ(0, job_->fake_tasks_[0].second);
- job_->fake_tasks_.clear();
+ EXPECT_EQ(1, static_cast<int>(job_->workers().size()));
+ VerifyWorker(50, 0);
DestroyParallelJob();
// Totally 3 requests for 100 bytes.
@@ -108,12 +144,9 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequests) {
// Task 2: Range:66-, for 34 bytes.
CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 3);
BuildParallelRequests();
- EXPECT_EQ(2, static_cast<int>(job_->fake_tasks_.size()));
- EXPECT_EQ(33, job_->fake_tasks_[0].first);
- EXPECT_EQ(33, job_->fake_tasks_[0].second);
- EXPECT_EQ(66, job_->fake_tasks_[1].first);
- EXPECT_EQ(0, job_->fake_tasks_[1].second);
- job_->fake_tasks_.clear();
+ EXPECT_EQ(2, static_cast<int>(job_->workers().size()));
+ VerifyWorker(33, 33);
+ VerifyWorker(66, 0);
DestroyParallelJob();
// Totally 3 requests for 100 bytes. Start from the 17th byte.
@@ -122,34 +155,31 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequests) {
// Task 2: Range:71-99, for 29 bytes.
CreateParallelJob(17, 83, DownloadItem::ReceivedSlices(), 3);
BuildParallelRequests();
- EXPECT_EQ(2, static_cast<int>(job_->fake_tasks_.size()));
- EXPECT_EQ(44, job_->fake_tasks_[0].first);
- EXPECT_EQ(27, job_->fake_tasks_[0].second);
- EXPECT_EQ(71, job_->fake_tasks_[1].first);
- EXPECT_EQ(0, job_->fake_tasks_[1].second);
- job_->fake_tasks_.clear();
+ EXPECT_EQ(2, static_cast<int>(job_->workers().size()));
+ VerifyWorker(44, 27);
+ VerifyWorker(71, 0);
DestroyParallelJob();
// Less than 2 requests, do nothing.
CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 1);
BuildParallelRequests();
- EXPECT_TRUE(job_->fake_tasks_.empty());
+ EXPECT_TRUE(job_->workers().empty());
DestroyParallelJob();
CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 0);
BuildParallelRequests();
- EXPECT_TRUE(job_->fake_tasks_.empty());
+ EXPECT_TRUE(job_->workers().empty());
DestroyParallelJob();
// Content-length is 0, do nothing.
CreateParallelJob(100, 0, DownloadItem::ReceivedSlices(), 3);
BuildParallelRequests();
- EXPECT_TRUE(job_->fake_tasks_.empty());
+ EXPECT_TRUE(job_->workers().empty());
DestroyParallelJob();
CreateParallelJob(0, 0, DownloadItem::ReceivedSlices(), 3);
BuildParallelRequests();
- EXPECT_TRUE(job_->fake_tasks_.empty());
+ EXPECT_TRUE(job_->workers().empty());
DestroyParallelJob();
// 2 bytes left for 3 additional requests. Only 1 are built.
@@ -157,10 +187,77 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequests) {
// Task 1: Range:99-, for 1 byte.
CreateParallelJob(98, 2, DownloadItem::ReceivedSlices(), 4);
BuildParallelRequests();
- EXPECT_EQ(1, static_cast<int>(job_->fake_tasks_.size()));
- EXPECT_EQ(99, job_->fake_tasks_[0].first);
- EXPECT_EQ(0, job_->fake_tasks_[0].second);
- job_->fake_tasks_.clear();
+ EXPECT_EQ(1, static_cast<int>(job_->workers().size()));
+ VerifyWorker(99, 0);
+ DestroyParallelJob();
+}
+
+// Pause, cancel, resume can be called before or after the worker establish
+// the byte stream.
+// These tests ensure the states consistency between the job and workers.
+
+// Ensure cancel before building the requests will result in no requests are
+// built.
+TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeBuildRequests) {
+ CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2);
+ EXPECT_CALL(*mock_request_handle_, CancelRequest());
+
+ // Job is canceled before building parallel requests.
+ job_->Cancel(true);
+ EXPECT_TRUE(IsJobCanceled());
+
+ BuildParallelRequests();
+ EXPECT_TRUE(job_->workers().empty());
+
+ DestroyParallelJob();
+}
+
+// Ensure cancel before adding the byte stream will result in workers being
+// canceled.
+TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeByteStreamReady) {
+ CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2);
+ EXPECT_CALL(*mock_request_handle_, CancelRequest());
+
+ BuildParallelRequests();
+ VerifyWorker(50, 0);
+
+ // Job is canceled after building parallel requests and before byte streams
+ // are added to the file sink.
+ job_->Cancel(true);
+ EXPECT_TRUE(IsJobCanceled());
+
+ for (auto& worker : job_->workers()) {
+ std::unique_ptr<MockDownloadRequestHandle> mock_handle =
+ base::MakeUnique<MockDownloadRequestHandle>();
+ EXPECT_CALL(*mock_handle.get(), CancelRequest());
+ MakeWorkerReady(worker.second.get(), std::move(mock_handle));
+ }
+
+ DestroyParallelJob();
+}
+
+// Ensure pause before adding the byte stream will result in workers being
+// paused.
+TEST_F(ParallelDownloadJobTest, EarlyPauseBeforeByteStreamReady) {
+ CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2);
+ EXPECT_CALL(*mock_request_handle_, PauseRequest());
+
+ BuildParallelRequests();
+ VerifyWorker(50, 0);
+
+ // Job is paused after building parallel requests and before adding the byte
+ // stream to the file sink.
+ job_->Pause();
+ EXPECT_TRUE(job_->is_paused());
+
+ for (auto& worker : job_->workers()) {
+ EXPECT_CALL(*job_.get(), CountOnByteStreamReady());
+ std::unique_ptr<MockDownloadRequestHandle> mock_handle =
+ base::MakeUnique<MockDownloadRequestHandle>();
+ EXPECT_CALL(*mock_handle.get(), PauseRequest());
+ MakeWorkerReady(worker.second.get(), std::move(mock_handle));
+ }
+
DestroyParallelJob();
}
« no previous file with comments | « content/browser/download/parallel_download_job.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698