OLD | NEW |
1 // Copyright 2017 The Chromium Authors. All rights reserved. | 1 // Copyright 2017 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/browser/download/parallel_download_job.h" | 5 #include "content/browser/download/parallel_download_job.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 #include <vector> | 8 #include <vector> |
9 | 9 |
10 #include "base/memory/ptr_util.h" | 10 #include "base/memory/ptr_util.h" |
(...skipping 21 matching lines...) Expand all Loading... |
32 }; | 32 }; |
33 | 33 |
34 } // namespace | 34 } // namespace |
35 | 35 |
36 class ParallelDownloadJobForTest : public ParallelDownloadJob { | 36 class ParallelDownloadJobForTest : public ParallelDownloadJob { |
37 public: | 37 public: |
38 ParallelDownloadJobForTest( | 38 ParallelDownloadJobForTest( |
39 DownloadItemImpl* download_item, | 39 DownloadItemImpl* download_item, |
40 std::unique_ptr<DownloadRequestHandleInterface> request_handle, | 40 std::unique_ptr<DownloadRequestHandleInterface> request_handle, |
41 const DownloadCreateInfo& create_info, | 41 const DownloadCreateInfo& create_info, |
42 int request_count) | 42 int request_count, |
| 43 int64_t min_slice_size) |
43 : ParallelDownloadJob(download_item, | 44 : ParallelDownloadJob(download_item, |
44 std::move(request_handle), | 45 std::move(request_handle), |
45 create_info), | 46 create_info), |
46 request_count_(request_count) {} | 47 request_count_(request_count), |
| 48 min_slice_size_(min_slice_size) {} |
47 | 49 |
48 void CreateRequest(int64_t offset, int64_t length) override { | 50 void CreateRequest(int64_t offset, int64_t length) override { |
49 std::unique_ptr<DownloadWorker> worker = | 51 std::unique_ptr<DownloadWorker> worker = |
50 base::MakeUnique<DownloadWorker>(this, offset, length); | 52 base::MakeUnique<DownloadWorker>(this, offset, length); |
51 | 53 |
52 DCHECK(workers_.find(offset) == workers_.end()); | 54 DCHECK(workers_.find(offset) == workers_.end()); |
53 workers_[offset] = std::move(worker); | 55 workers_[offset] = std::move(worker); |
54 } | 56 } |
55 | 57 |
56 ParallelDownloadJob::WorkerMap& workers() { return workers_; } | 58 ParallelDownloadJob::WorkerMap& workers() { return workers_; } |
57 | 59 |
58 int GetParallelRequestCount() const override { return request_count_; } | 60 int GetParallelRequestCount() const override { return request_count_; } |
| 61 int64_t GetMinSliceSize() const override { return min_slice_size_; } |
59 | 62 |
60 void OnByteStreamReady( | 63 void OnByteStreamReady( |
61 DownloadWorker* worker, | 64 DownloadWorker* worker, |
62 std::unique_ptr<ByteStreamReader> stream_reader) override { | 65 std::unique_ptr<ByteStreamReader> stream_reader) override { |
63 CountOnByteStreamReady(); | 66 CountOnByteStreamReady(); |
64 } | 67 } |
65 | 68 |
66 MOCK_METHOD0(CountOnByteStreamReady, void()); | 69 MOCK_METHOD0(CountOnByteStreamReady, void()); |
67 | 70 |
68 private: | 71 private: |
69 int request_count_; | 72 int request_count_; |
| 73 int min_slice_size_; |
70 DISALLOW_COPY_AND_ASSIGN(ParallelDownloadJobForTest); | 74 DISALLOW_COPY_AND_ASSIGN(ParallelDownloadJobForTest); |
71 }; | 75 }; |
72 | 76 |
73 class ParallelDownloadJobTest : public testing::Test { | 77 class ParallelDownloadJobTest : public testing::Test { |
74 public: | 78 public: |
75 void CreateParallelJob(int64_t offset, | 79 void CreateParallelJob(int64_t initial_request_offset, |
| 80 int64_t initial_request_length, |
76 int64_t content_length, | 81 int64_t content_length, |
77 const DownloadItem::ReceivedSlices& slices, | 82 const DownloadItem::ReceivedSlices& slices, |
78 int request_count) { | 83 int request_count, |
| 84 int64_t min_slice_size) { |
79 item_delegate_ = base::MakeUnique<DownloadItemImplDelegate>(); | 85 item_delegate_ = base::MakeUnique<DownloadItemImplDelegate>(); |
80 download_item_ = base::MakeUnique<NiceMock<MockDownloadItemImpl>>( | 86 download_item_ = base::MakeUnique<NiceMock<MockDownloadItemImpl>>( |
81 item_delegate_.get(), slices); | 87 item_delegate_.get(), slices); |
82 DownloadCreateInfo info; | 88 DownloadCreateInfo info; |
83 info.offset = offset; | 89 info.offset = initial_request_offset; |
| 90 info.length = initial_request_length; |
84 info.total_bytes = content_length; | 91 info.total_bytes = content_length; |
85 std::unique_ptr<MockDownloadRequestHandle> request_handle = | 92 std::unique_ptr<MockDownloadRequestHandle> request_handle = |
86 base::MakeUnique<MockDownloadRequestHandle>(); | 93 base::MakeUnique<MockDownloadRequestHandle>(); |
87 mock_request_handle_ = request_handle.get(); | 94 mock_request_handle_ = request_handle.get(); |
88 job_ = base::MakeUnique<ParallelDownloadJobForTest>( | 95 job_ = base::MakeUnique<ParallelDownloadJobForTest>( |
89 download_item_.get(), std::move(request_handle), info, request_count); | 96 download_item_.get(), std::move(request_handle), info, request_count, |
| 97 min_slice_size); |
90 } | 98 } |
91 | 99 |
92 void DestroyParallelJob() { | 100 void DestroyParallelJob() { |
93 job_.reset(); | 101 job_.reset(); |
94 download_item_.reset(); | 102 download_item_.reset(); |
95 item_delegate_.reset(); | 103 item_delegate_.reset(); |
96 mock_request_handle_ = nullptr; | 104 mock_request_handle_ = nullptr; |
97 } | 105 } |
98 | 106 |
99 void BuildParallelRequests() { job_->BuildParallelRequests(); } | 107 void BuildParallelRequests() { job_->BuildParallelRequests(); } |
(...skipping 20 matching lines...) Expand all Loading... |
120 } | 128 } |
121 | 129 |
122 content::TestBrowserThreadBundle browser_threads_; | 130 content::TestBrowserThreadBundle browser_threads_; |
123 std::unique_ptr<DownloadItemImplDelegate> item_delegate_; | 131 std::unique_ptr<DownloadItemImplDelegate> item_delegate_; |
124 std::unique_ptr<MockDownloadItemImpl> download_item_; | 132 std::unique_ptr<MockDownloadItemImpl> download_item_; |
125 std::unique_ptr<ParallelDownloadJobForTest> job_; | 133 std::unique_ptr<ParallelDownloadJobForTest> job_; |
126 // Request handle for the original request. | 134 // Request handle for the original request. |
127 MockDownloadRequestHandle* mock_request_handle_; | 135 MockDownloadRequestHandle* mock_request_handle_; |
128 }; | 136 }; |
129 | 137 |
130 // Test if parallel requests can be built correctly for a new download. | 138 // Test if parallel requests can be built correctly for a new download without |
131 TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequests) { | 139 // existing slices. |
| 140 TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithoutSlices) { |
132 // Totally 2 requests for 100 bytes. | 141 // Totally 2 requests for 100 bytes. |
133 // Original request: Range:0-49, for 50 bytes. | 142 // Original request: Range:0-49, for 50 bytes. |
134 // Task 1: Range:50-, for 50 bytes. | 143 // Task 1: Range:50-, for 50 bytes. |
135 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2); | 144 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
136 BuildParallelRequests(); | 145 BuildParallelRequests(); |
137 EXPECT_EQ(1, static_cast<int>(job_->workers().size())); | 146 EXPECT_EQ(1, static_cast<int>(job_->workers().size())); |
138 VerifyWorker(50, 0); | 147 VerifyWorker(50, 0); |
139 DestroyParallelJob(); | 148 DestroyParallelJob(); |
140 | 149 |
141 // Totally 3 requests for 100 bytes. | 150 // Totally 3 requests for 100 bytes. |
142 // Original request: Range:0-32, for 33 bytes. | 151 // Original request: Range:0-32, for 33 bytes. |
143 // Task 1: Range:33-65, for 33 bytes. | 152 // Task 1: Range:33-65, for 33 bytes. |
144 // Task 2: Range:66-, for 34 bytes. | 153 // Task 2: Range:66-, for 34 bytes. |
145 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 3); | 154 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 3, 1); |
146 BuildParallelRequests(); | 155 BuildParallelRequests(); |
147 EXPECT_EQ(2, static_cast<int>(job_->workers().size())); | 156 EXPECT_EQ(2, static_cast<int>(job_->workers().size())); |
148 VerifyWorker(33, 33); | 157 VerifyWorker(33, 33); |
149 VerifyWorker(66, 0); | 158 VerifyWorker(66, 0); |
150 DestroyParallelJob(); | 159 DestroyParallelJob(); |
151 | 160 |
152 // Totally 3 requests for 100 bytes. Start from the 17th byte. | 161 // Less than 2 requests, do nothing. |
153 // Original request: Range:17-43, for 27 bytes. | 162 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 1, 1); |
| 163 BuildParallelRequests(); |
| 164 EXPECT_TRUE(job_->workers().empty()); |
| 165 DestroyParallelJob(); |
| 166 |
| 167 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 0, 1); |
| 168 BuildParallelRequests(); |
| 169 EXPECT_TRUE(job_->workers().empty()); |
| 170 DestroyParallelJob(); |
| 171 |
| 172 // Content-length is 0, do nothing. |
| 173 CreateParallelJob(0, 0, 0, DownloadItem::ReceivedSlices(), 3, 1); |
| 174 BuildParallelRequests(); |
| 175 EXPECT_TRUE(job_->workers().empty()); |
| 176 DestroyParallelJob(); |
| 177 } |
| 178 |
| 179 TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithSlices) { |
| 180 // File size: 100 bytes. |
| 181 // Received slices: [0, 17] |
| 182 // Original request: Range:12-. Content-length: 88. |
| 183 // Totally 3 requests for 83 bytes. |
| 184 // Original request: Range:12-43. |
154 // Task 1: Range:44-70, for 27 bytes. | 185 // Task 1: Range:44-70, for 27 bytes. |
155 // Task 2: Range:71-99, for 29 bytes. | 186 // Task 2: Range:71-, for 29 bytes. |
156 CreateParallelJob(17, 83, DownloadItem::ReceivedSlices(), 3); | 187 DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(0, 17)}; |
| 188 CreateParallelJob(12, 0, 88, slices, 3, 1); |
157 BuildParallelRequests(); | 189 BuildParallelRequests(); |
158 EXPECT_EQ(2, static_cast<int>(job_->workers().size())); | 190 EXPECT_EQ(2, static_cast<int>(job_->workers().size())); |
159 VerifyWorker(44, 27); | 191 VerifyWorker(44, 27); |
160 VerifyWorker(71, 0); | 192 VerifyWorker(71, 0); |
161 DestroyParallelJob(); | 193 DestroyParallelJob(); |
162 | 194 |
163 // Less than 2 requests, do nothing. | 195 // File size: 100 bytes. |
164 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 1); | 196 // Received slices: [0, 98], Range:0-97. |
| 197 // Original request: Range:98-. Content-length: 2. |
| 198 // 2 bytes left for 4 requests. Only 1 additional request. |
| 199 // Original request: Range:98-99, for 1 bytes. |
| 200 // Task 1: Range:99-, for 1 bytes. |
| 201 slices = {DownloadItem::ReceivedSlice(0, 98)}; |
| 202 CreateParallelJob(98, 0, 2, slices, 4, 1); |
| 203 BuildParallelRequests(); |
| 204 EXPECT_EQ(1, static_cast<int>(job_->workers().size())); |
| 205 VerifyWorker(99, 0); |
| 206 DestroyParallelJob(); |
| 207 |
| 208 // Content-Length is 0, no additional requests. |
| 209 slices = {DownloadItem::ReceivedSlice(0, 100)}; |
| 210 CreateParallelJob(100, 0, 0, slices, 3, 1); |
165 BuildParallelRequests(); | 211 BuildParallelRequests(); |
166 EXPECT_TRUE(job_->workers().empty()); | 212 EXPECT_TRUE(job_->workers().empty()); |
167 DestroyParallelJob(); | 213 DestroyParallelJob(); |
168 | 214 |
169 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 0); | 215 // File size: 100 bytes. |
| 216 // Original request: Range:0-. Content-length: 12(Incorrect server header). |
| 217 // The request count is 2, however the file contains 3 holes, and we don't |
| 218 // know if the last slice is completed, so there should be 3 requests in |
| 219 // parallel and the last request is an out-of-range request. |
| 220 slices = { |
| 221 DownloadItem::ReceivedSlice(10, 10), DownloadItem::ReceivedSlice(20, 10), |
| 222 DownloadItem::ReceivedSlice(40, 10), DownloadItem::ReceivedSlice(90, 10)}; |
| 223 CreateParallelJob(0, 0, 12, slices, 2, 1); |
170 BuildParallelRequests(); | 224 BuildParallelRequests(); |
171 EXPECT_TRUE(job_->workers().empty()); | 225 EXPECT_EQ(3, static_cast<int>(job_->workers().size())); |
172 DestroyParallelJob(); | 226 VerifyWorker(30, 10); |
173 | 227 VerifyWorker(50, 40); |
174 // Content-length is 0, do nothing. | 228 VerifyWorker(100, 0); |
175 CreateParallelJob(100, 0, DownloadItem::ReceivedSlices(), 3); | |
176 BuildParallelRequests(); | |
177 EXPECT_TRUE(job_->workers().empty()); | |
178 DestroyParallelJob(); | |
179 | |
180 CreateParallelJob(0, 0, DownloadItem::ReceivedSlices(), 3); | |
181 BuildParallelRequests(); | |
182 EXPECT_TRUE(job_->workers().empty()); | |
183 DestroyParallelJob(); | |
184 | |
185 // 2 bytes left for 3 additional requests. Only 1 are built. | |
186 // Original request: Range:98-98, for 1 byte. | |
187 // Task 1: Range:99-, for 1 byte. | |
188 CreateParallelJob(98, 2, DownloadItem::ReceivedSlices(), 4); | |
189 BuildParallelRequests(); | |
190 EXPECT_EQ(1, static_cast<int>(job_->workers().size())); | |
191 VerifyWorker(99, 0); | |
192 DestroyParallelJob(); | 229 DestroyParallelJob(); |
193 } | 230 } |
194 | 231 |
| 232 // Ensure the holes before the initial request offset is patched up with |
| 233 // parallel requests. |
| 234 // This may happen when the previous session is non-parallel but the new |
| 235 // session is parallel, and etag doesn't change. |
| 236 TEST_F(ParallelDownloadJobTest, CreateNewRequestsIncorrectInitOffset) { |
| 237 // Although we can parallel 4 requests, but we find 2 holes, so just patch |
| 238 // them up with 2 requests. |
| 239 // The offset of 2 slices to download are before the initial request offset. |
| 240 DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(40, 5)}; |
| 241 CreateParallelJob(50, 0, 50, slices, 4, 1); |
| 242 BuildParallelRequests(); |
| 243 EXPECT_EQ(2, static_cast<int>(job_->workers().size())); |
| 244 VerifyWorker(0, 40); |
| 245 VerifyWorker(45, 0); |
| 246 DestroyParallelJob(); |
| 247 |
| 248 // There is one slice to download before initial request offset, so we just |
| 249 // build one request. |
| 250 CreateParallelJob(50, 0, 50, DownloadItem::ReceivedSlices(), 4, 1); |
| 251 BuildParallelRequests(); |
| 252 EXPECT_EQ(1, static_cast<int>(job_->workers().size())); |
| 253 VerifyWorker(0, 0); |
| 254 DestroyParallelJob(); |
| 255 } |
| 256 |
195 // Pause, cancel, resume can be called before or after the worker establish | 257 // Pause, cancel, resume can be called before or after the worker establish |
196 // the byte stream. | 258 // the byte stream. |
197 // These tests ensure the states consistency between the job and workers. | 259 // These tests ensure the states consistency between the job and workers. |
198 | 260 |
199 // Ensure cancel before building the requests will result in no requests are | 261 // Ensure cancel before building the requests will result in no requests are |
200 // built. | 262 // built. |
201 TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeBuildRequests) { | 263 TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeBuildRequests) { |
202 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2); | 264 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
203 EXPECT_CALL(*mock_request_handle_, CancelRequest()); | 265 EXPECT_CALL(*mock_request_handle_, CancelRequest()); |
204 | 266 |
205 // Job is canceled before building parallel requests. | 267 // Job is canceled before building parallel requests. |
206 job_->Cancel(true); | 268 job_->Cancel(true); |
207 EXPECT_TRUE(IsJobCanceled()); | 269 EXPECT_TRUE(IsJobCanceled()); |
208 | 270 |
209 BuildParallelRequests(); | 271 BuildParallelRequests(); |
210 EXPECT_TRUE(job_->workers().empty()); | 272 EXPECT_TRUE(job_->workers().empty()); |
211 | 273 |
212 DestroyParallelJob(); | 274 DestroyParallelJob(); |
213 } | 275 } |
214 | 276 |
215 // Ensure cancel before adding the byte stream will result in workers being | 277 // Ensure cancel before adding the byte stream will result in workers being |
216 // canceled. | 278 // canceled. |
217 TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeByteStreamReady) { | 279 TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeByteStreamReady) { |
218 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2); | 280 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
219 EXPECT_CALL(*mock_request_handle_, CancelRequest()); | 281 EXPECT_CALL(*mock_request_handle_, CancelRequest()); |
220 | 282 |
221 BuildParallelRequests(); | 283 BuildParallelRequests(); |
222 VerifyWorker(50, 0); | 284 VerifyWorker(50, 0); |
223 | 285 |
224 // Job is canceled after building parallel requests and before byte streams | 286 // Job is canceled after building parallel requests and before byte streams |
225 // are added to the file sink. | 287 // are added to the file sink. |
226 job_->Cancel(true); | 288 job_->Cancel(true); |
227 EXPECT_TRUE(IsJobCanceled()); | 289 EXPECT_TRUE(IsJobCanceled()); |
228 | 290 |
229 for (auto& worker : job_->workers()) { | 291 for (auto& worker : job_->workers()) { |
230 std::unique_ptr<MockDownloadRequestHandle> mock_handle = | 292 std::unique_ptr<MockDownloadRequestHandle> mock_handle = |
231 base::MakeUnique<MockDownloadRequestHandle>(); | 293 base::MakeUnique<MockDownloadRequestHandle>(); |
232 EXPECT_CALL(*mock_handle.get(), CancelRequest()); | 294 EXPECT_CALL(*mock_handle.get(), CancelRequest()); |
233 MakeWorkerReady(worker.second.get(), std::move(mock_handle)); | 295 MakeWorkerReady(worker.second.get(), std::move(mock_handle)); |
234 } | 296 } |
235 | 297 |
236 DestroyParallelJob(); | 298 DestroyParallelJob(); |
237 } | 299 } |
238 | 300 |
239 // Ensure pause before adding the byte stream will result in workers being | 301 // Ensure pause before adding the byte stream will result in workers being |
240 // paused. | 302 // paused. |
241 TEST_F(ParallelDownloadJobTest, EarlyPauseBeforeByteStreamReady) { | 303 TEST_F(ParallelDownloadJobTest, EarlyPauseBeforeByteStreamReady) { |
242 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2); | 304 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); |
243 EXPECT_CALL(*mock_request_handle_, PauseRequest()); | 305 EXPECT_CALL(*mock_request_handle_, PauseRequest()); |
244 | 306 |
245 BuildParallelRequests(); | 307 BuildParallelRequests(); |
246 VerifyWorker(50, 0); | 308 VerifyWorker(50, 0); |
247 | 309 |
248 // Job is paused after building parallel requests and before adding the byte | 310 // Job is paused after building parallel requests and before adding the byte |
249 // stream to the file sink. | 311 // stream to the file sink. |
250 job_->Pause(); | 312 job_->Pause(); |
251 EXPECT_TRUE(job_->is_paused()); | 313 EXPECT_TRUE(job_->is_paused()); |
252 | 314 |
253 for (auto& worker : job_->workers()) { | 315 for (auto& worker : job_->workers()) { |
254 EXPECT_CALL(*job_.get(), CountOnByteStreamReady()); | 316 EXPECT_CALL(*job_.get(), CountOnByteStreamReady()); |
255 std::unique_ptr<MockDownloadRequestHandle> mock_handle = | 317 std::unique_ptr<MockDownloadRequestHandle> mock_handle = |
256 base::MakeUnique<MockDownloadRequestHandle>(); | 318 base::MakeUnique<MockDownloadRequestHandle>(); |
257 EXPECT_CALL(*mock_handle.get(), PauseRequest()); | 319 EXPECT_CALL(*mock_handle.get(), PauseRequest()); |
258 MakeWorkerReady(worker.second.get(), std::move(mock_handle)); | 320 MakeWorkerReady(worker.second.get(), std::move(mock_handle)); |
259 } | 321 } |
260 | 322 |
261 DestroyParallelJob(); | 323 DestroyParallelJob(); |
262 } | 324 } |
263 | 325 |
264 } // namespace content | 326 } // namespace content |
OLD | NEW |