Index: content/browser/download/parallel_download_job_unittest.cc |
diff --git a/content/browser/download/parallel_download_job_unittest.cc b/content/browser/download/parallel_download_job_unittest.cc |
index e31fb45a2a36d3844dc943fe103fcadb8fe3ecc4..de6da07deda379c106927e458ac183375c69ee02 100644 |
--- a/content/browser/download/parallel_download_job_unittest.cc |
+++ b/content/browser/download/parallel_download_job_unittest.cc |
@@ -77,7 +77,6 @@ class ParallelDownloadJobForTest : public ParallelDownloadJob { |
class ParallelDownloadJobTest : public testing::Test { |
public: |
void CreateParallelJob(int64_t initial_request_offset, |
- int64_t initial_request_length, |
int64_t content_length, |
const DownloadItem::ReceivedSlices& slices, |
int request_count, |
@@ -87,7 +86,6 @@ class ParallelDownloadJobTest : public testing::Test { |
item_delegate_.get(), slices); |
DownloadCreateInfo info; |
info.offset = initial_request_offset; |
- info.length = initial_request_length; |
info.total_bytes = content_length; |
std::unique_ptr<MockDownloadRequestHandle> request_handle = |
base::MakeUnique<MockDownloadRequestHandle>(); |
@@ -141,7 +139,7 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithoutSlices) { |
// Totally 2 requests for 100 bytes. |
// Original request: Range:0-49, for 50 bytes. |
// Task 1: Range:50-, for 50 bytes. |
- CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
+ CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
BuildParallelRequests(); |
EXPECT_EQ(1, static_cast<int>(job_->workers().size())); |
VerifyWorker(50, 0); |
@@ -151,7 +149,7 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithoutSlices) { |
// Original request: Range:0-32, for 33 bytes. |
// Task 1: Range:33-65, for 33 bytes. |
// Task 2: Range:66-, for 34 bytes. |
- CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 3, 1); |
+ CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 3, 1); |
BuildParallelRequests(); |
EXPECT_EQ(2, static_cast<int>(job_->workers().size())); |
VerifyWorker(33, 33); |
@@ -159,18 +157,18 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithoutSlices) { |
DestroyParallelJob(); |
// Less than 2 requests, do nothing. |
- CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 1, 1); |
+ CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 1, 1); |
BuildParallelRequests(); |
EXPECT_TRUE(job_->workers().empty()); |
DestroyParallelJob(); |
- CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 0, 1); |
+ CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 0, 1); |
BuildParallelRequests(); |
EXPECT_TRUE(job_->workers().empty()); |
DestroyParallelJob(); |
// Content-length is 0, do nothing. |
- CreateParallelJob(0, 0, 0, DownloadItem::ReceivedSlices(), 3, 1); |
+ CreateParallelJob(0, 0, DownloadItem::ReceivedSlices(), 3, 1); |
BuildParallelRequests(); |
EXPECT_TRUE(job_->workers().empty()); |
DestroyParallelJob(); |
@@ -185,7 +183,7 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithSlices) { |
// Task 1: Range:44-70, for 27 bytes. |
// Task 2: Range:71-, for 29 bytes. |
DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(0, 17)}; |
- CreateParallelJob(12, 0, 88, slices, 3, 1); |
+ CreateParallelJob(12, 88, slices, 3, 1); |
BuildParallelRequests(); |
EXPECT_EQ(2, static_cast<int>(job_->workers().size())); |
VerifyWorker(44, 27); |
@@ -199,7 +197,7 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithSlices) { |
// Original request: Range:98-99, for 1 bytes. |
// Task 1: Range:99-, for 1 bytes. |
slices = {DownloadItem::ReceivedSlice(0, 98)}; |
- CreateParallelJob(98, 0, 2, slices, 4, 1); |
+ CreateParallelJob(98, 2, slices, 4, 1); |
BuildParallelRequests(); |
EXPECT_EQ(1, static_cast<int>(job_->workers().size())); |
VerifyWorker(99, 0); |
@@ -207,7 +205,7 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithSlices) { |
// Content-Length is 0, no additional requests. |
slices = {DownloadItem::ReceivedSlice(0, 100)}; |
- CreateParallelJob(100, 0, 0, slices, 3, 1); |
+ CreateParallelJob(100, 0, slices, 3, 1); |
BuildParallelRequests(); |
EXPECT_TRUE(job_->workers().empty()); |
DestroyParallelJob(); |
@@ -220,7 +218,7 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithSlices) { |
slices = { |
DownloadItem::ReceivedSlice(10, 10), DownloadItem::ReceivedSlice(20, 10), |
DownloadItem::ReceivedSlice(40, 10), DownloadItem::ReceivedSlice(90, 10)}; |
- CreateParallelJob(0, 0, 12, slices, 2, 1); |
+ CreateParallelJob(0, 12, slices, 2, 1); |
BuildParallelRequests(); |
EXPECT_EQ(3, static_cast<int>(job_->workers().size())); |
VerifyWorker(30, 10); |
@@ -229,31 +227,6 @@ TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithSlices) { |
DestroyParallelJob(); |
} |
-// Ensure the holes before the initial request offset is patched up with |
-// parallel requests. |
-// This may happen when the previous session is non-parallel but the new |
-// session is parallel, and etag doesn't change. |
-TEST_F(ParallelDownloadJobTest, CreateNewRequestsIncorrectInitOffset) { |
- // Although we can parallel 4 requests, but we find 2 holes, so just patch |
- // them up with 2 requests. |
- // The offset of 2 slices to download are before the initial request offset. |
- DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(40, 5)}; |
- CreateParallelJob(50, 0, 50, slices, 4, 1); |
- BuildParallelRequests(); |
- EXPECT_EQ(2, static_cast<int>(job_->workers().size())); |
- VerifyWorker(0, 40); |
- VerifyWorker(45, 0); |
- DestroyParallelJob(); |
- |
- // There is one slice to download before initial request offset, so we just |
- // build one request. |
- CreateParallelJob(50, 0, 50, DownloadItem::ReceivedSlices(), 4, 1); |
- BuildParallelRequests(); |
- EXPECT_EQ(1, static_cast<int>(job_->workers().size())); |
- VerifyWorker(0, 0); |
- DestroyParallelJob(); |
-} |
- |
// Pause, cancel, resume can be called before or after the worker establish |
// the byte stream. |
// These tests ensure the states consistency between the job and workers. |
@@ -261,7 +234,7 @@ TEST_F(ParallelDownloadJobTest, CreateNewRequestsIncorrectInitOffset) { |
// Ensure cancel before building the requests will result in no requests are |
// built. |
TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeBuildRequests) { |
- CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
+ CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
EXPECT_CALL(*mock_request_handle_, CancelRequest()); |
// Job is canceled before building parallel requests. |
@@ -277,7 +250,7 @@ TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeBuildRequests) { |
// Ensure cancel before adding the byte stream will result in workers being |
// canceled. |
TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeByteStreamReady) { |
- CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
+ CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
EXPECT_CALL(*mock_request_handle_, CancelRequest()); |
BuildParallelRequests(); |
@@ -301,7 +274,7 @@ TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeByteStreamReady) { |
// Ensure pause before adding the byte stream will result in workers being |
// paused. |
TEST_F(ParallelDownloadJobTest, EarlyPauseBeforeByteStreamReady) { |
- CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
+ CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
EXPECT_CALL(*mock_request_handle_, PauseRequest()); |
BuildParallelRequests(); |