Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(84)

Side by Side Diff: content/browser/download/parallel_download_job_unittest.cc

Issue 2789623005: Add UMA metric to track parallel download requests stats. (Closed)
Patch Set: Work on feedback. Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2017 The Chromium Authors. All rights reserved. 1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/browser/download/parallel_download_job.h" 5 #include "content/browser/download/parallel_download_job.h"
6 6
7 #include <utility> 7 #include <utility>
8 #include <vector> 8 #include <vector>
9 9
10 #include "base/memory/ptr_util.h" 10 #include "base/memory/ptr_util.h"
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
70 70
71 private: 71 private:
72 int request_count_; 72 int request_count_;
73 int min_slice_size_; 73 int min_slice_size_;
74 DISALLOW_COPY_AND_ASSIGN(ParallelDownloadJobForTest); 74 DISALLOW_COPY_AND_ASSIGN(ParallelDownloadJobForTest);
75 }; 75 };
76 76
77 class ParallelDownloadJobTest : public testing::Test { 77 class ParallelDownloadJobTest : public testing::Test {
78 public: 78 public:
79 void CreateParallelJob(int64_t initial_request_offset, 79 void CreateParallelJob(int64_t initial_request_offset,
80 int64_t initial_request_length,
81 int64_t content_length, 80 int64_t content_length,
82 const DownloadItem::ReceivedSlices& slices, 81 const DownloadItem::ReceivedSlices& slices,
83 int request_count, 82 int request_count,
84 int64_t min_slice_size) { 83 int64_t min_slice_size) {
85 item_delegate_ = base::MakeUnique<DownloadItemImplDelegate>(); 84 item_delegate_ = base::MakeUnique<DownloadItemImplDelegate>();
86 download_item_ = base::MakeUnique<NiceMock<MockDownloadItemImpl>>( 85 download_item_ = base::MakeUnique<NiceMock<MockDownloadItemImpl>>(
87 item_delegate_.get(), slices); 86 item_delegate_.get(), slices);
88 DownloadCreateInfo info; 87 DownloadCreateInfo info;
89 info.offset = initial_request_offset; 88 info.offset = initial_request_offset;
90 info.length = initial_request_length;
91 info.total_bytes = content_length; 89 info.total_bytes = content_length;
92 std::unique_ptr<MockDownloadRequestHandle> request_handle = 90 std::unique_ptr<MockDownloadRequestHandle> request_handle =
93 base::MakeUnique<MockDownloadRequestHandle>(); 91 base::MakeUnique<MockDownloadRequestHandle>();
94 mock_request_handle_ = request_handle.get(); 92 mock_request_handle_ = request_handle.get();
95 job_ = base::MakeUnique<ParallelDownloadJobForTest>( 93 job_ = base::MakeUnique<ParallelDownloadJobForTest>(
96 download_item_.get(), std::move(request_handle), info, request_count, 94 download_item_.get(), std::move(request_handle), info, request_count,
97 min_slice_size); 95 min_slice_size);
98 } 96 }
99 97
100 void DestroyParallelJob() { 98 void DestroyParallelJob() {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
134 // Request handle for the original request. 132 // Request handle for the original request.
135 MockDownloadRequestHandle* mock_request_handle_; 133 MockDownloadRequestHandle* mock_request_handle_;
136 }; 134 };
137 135
138 // Test if parallel requests can be built correctly for a new download without 136 // Test if parallel requests can be built correctly for a new download without
139 // existing slices. 137 // existing slices.
140 TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithoutSlices) { 138 TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithoutSlices) {
141 // Totally 2 requests for 100 bytes. 139 // Totally 2 requests for 100 bytes.
142 // Original request: Range:0-49, for 50 bytes. 140 // Original request: Range:0-49, for 50 bytes.
143 // Task 1: Range:50-, for 50 bytes. 141 // Task 1: Range:50-, for 50 bytes.
144 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); 142 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1);
145 BuildParallelRequests(); 143 BuildParallelRequests();
146 EXPECT_EQ(1, static_cast<int>(job_->workers().size())); 144 EXPECT_EQ(1, static_cast<int>(job_->workers().size()));
147 VerifyWorker(50, 0); 145 VerifyWorker(50, 0);
148 DestroyParallelJob(); 146 DestroyParallelJob();
149 147
150 // Totally 3 requests for 100 bytes. 148 // Totally 3 requests for 100 bytes.
151 // Original request: Range:0-32, for 33 bytes. 149 // Original request: Range:0-32, for 33 bytes.
152 // Task 1: Range:33-65, for 33 bytes. 150 // Task 1: Range:33-65, for 33 bytes.
153 // Task 2: Range:66-, for 34 bytes. 151 // Task 2: Range:66-, for 34 bytes.
154 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 3, 1); 152 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 3, 1);
155 BuildParallelRequests(); 153 BuildParallelRequests();
156 EXPECT_EQ(2, static_cast<int>(job_->workers().size())); 154 EXPECT_EQ(2, static_cast<int>(job_->workers().size()));
157 VerifyWorker(33, 33); 155 VerifyWorker(33, 33);
158 VerifyWorker(66, 0); 156 VerifyWorker(66, 0);
159 DestroyParallelJob(); 157 DestroyParallelJob();
160 158
161 // Less than 2 requests, do nothing. 159 // Less than 2 requests, do nothing.
162 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 1, 1); 160 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 1, 1);
163 BuildParallelRequests(); 161 BuildParallelRequests();
164 EXPECT_TRUE(job_->workers().empty()); 162 EXPECT_TRUE(job_->workers().empty());
165 DestroyParallelJob(); 163 DestroyParallelJob();
166 164
167 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 0, 1); 165 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 0, 1);
168 BuildParallelRequests(); 166 BuildParallelRequests();
169 EXPECT_TRUE(job_->workers().empty()); 167 EXPECT_TRUE(job_->workers().empty());
170 DestroyParallelJob(); 168 DestroyParallelJob();
171 169
172 // Content-length is 0, do nothing. 170 // Content-length is 0, do nothing.
173 CreateParallelJob(0, 0, 0, DownloadItem::ReceivedSlices(), 3, 1); 171 CreateParallelJob(0, 0, DownloadItem::ReceivedSlices(), 3, 1);
174 BuildParallelRequests(); 172 BuildParallelRequests();
175 EXPECT_TRUE(job_->workers().empty()); 173 EXPECT_TRUE(job_->workers().empty());
176 DestroyParallelJob(); 174 DestroyParallelJob();
177 } 175 }
178 176
179 TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithSlices) { 177 TEST_F(ParallelDownloadJobTest, CreateNewDownloadRequestsWithSlices) {
180 // File size: 100 bytes. 178 // File size: 100 bytes.
181 // Received slices: [0, 17] 179 // Received slices: [0, 17]
182 // Original request: Range:12-. Content-length: 88. 180 // Original request: Range:12-. Content-length: 88.
183 // Totally 3 requests for 83 bytes. 181 // Totally 3 requests for 83 bytes.
184 // Original request: Range:12-43. 182 // Original request: Range:12-43.
185 // Task 1: Range:44-70, for 27 bytes. 183 // Task 1: Range:44-70, for 27 bytes.
186 // Task 2: Range:71-, for 29 bytes. 184 // Task 2: Range:71-, for 29 bytes.
187 DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(0, 17)}; 185 DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(0, 17)};
188 CreateParallelJob(12, 0, 88, slices, 3, 1); 186 CreateParallelJob(12, 88, slices, 3, 1);
189 BuildParallelRequests(); 187 BuildParallelRequests();
190 EXPECT_EQ(2, static_cast<int>(job_->workers().size())); 188 EXPECT_EQ(2, static_cast<int>(job_->workers().size()));
191 VerifyWorker(44, 27); 189 VerifyWorker(44, 27);
192 VerifyWorker(71, 0); 190 VerifyWorker(71, 0);
193 DestroyParallelJob(); 191 DestroyParallelJob();
194 192
195 // File size: 100 bytes. 193 // File size: 100 bytes.
196 // Received slices: [0, 98], Range:0-97. 194 // Received slices: [0, 98], Range:0-97.
197 // Original request: Range:98-. Content-length: 2. 195 // Original request: Range:98-. Content-length: 2.
198 // 2 bytes left for 4 requests. Only 1 additional request. 196 // 2 bytes left for 4 requests. Only 1 additional request.
199 // Original request: Range:98-99, for 1 bytes. 197 // Original request: Range:98-99, for 1 bytes.
200 // Task 1: Range:99-, for 1 bytes. 198 // Task 1: Range:99-, for 1 bytes.
201 slices = {DownloadItem::ReceivedSlice(0, 98)}; 199 slices = {DownloadItem::ReceivedSlice(0, 98)};
202 CreateParallelJob(98, 0, 2, slices, 4, 1); 200 CreateParallelJob(98, 2, slices, 4, 1);
203 BuildParallelRequests(); 201 BuildParallelRequests();
204 EXPECT_EQ(1, static_cast<int>(job_->workers().size())); 202 EXPECT_EQ(1, static_cast<int>(job_->workers().size()));
205 VerifyWorker(99, 0); 203 VerifyWorker(99, 0);
206 DestroyParallelJob(); 204 DestroyParallelJob();
207 205
208 // Content-Length is 0, no additional requests. 206 // Content-Length is 0, no additional requests.
209 slices = {DownloadItem::ReceivedSlice(0, 100)}; 207 slices = {DownloadItem::ReceivedSlice(0, 100)};
210 CreateParallelJob(100, 0, 0, slices, 3, 1); 208 CreateParallelJob(100, 0, slices, 3, 1);
211 BuildParallelRequests(); 209 BuildParallelRequests();
212 EXPECT_TRUE(job_->workers().empty()); 210 EXPECT_TRUE(job_->workers().empty());
213 DestroyParallelJob(); 211 DestroyParallelJob();
214 212
215 // File size: 100 bytes. 213 // File size: 100 bytes.
216 // Original request: Range:0-. Content-length: 12(Incorrect server header). 214 // Original request: Range:0-. Content-length: 12(Incorrect server header).
217 // The request count is 2, however the file contains 3 holes, and we don't 215 // The request count is 2, however the file contains 3 holes, and we don't
218 // know if the last slice is completed, so there should be 3 requests in 216 // know if the last slice is completed, so there should be 3 requests in
219 // parallel and the last request is an out-of-range request. 217 // parallel and the last request is an out-of-range request.
220 slices = { 218 slices = {
221 DownloadItem::ReceivedSlice(10, 10), DownloadItem::ReceivedSlice(20, 10), 219 DownloadItem::ReceivedSlice(10, 10), DownloadItem::ReceivedSlice(20, 10),
222 DownloadItem::ReceivedSlice(40, 10), DownloadItem::ReceivedSlice(90, 10)}; 220 DownloadItem::ReceivedSlice(40, 10), DownloadItem::ReceivedSlice(90, 10)};
223 CreateParallelJob(0, 0, 12, slices, 2, 1); 221 CreateParallelJob(0, 12, slices, 2, 1);
224 BuildParallelRequests(); 222 BuildParallelRequests();
225 EXPECT_EQ(3, static_cast<int>(job_->workers().size())); 223 EXPECT_EQ(3, static_cast<int>(job_->workers().size()));
226 VerifyWorker(30, 10); 224 VerifyWorker(30, 10);
227 VerifyWorker(50, 40); 225 VerifyWorker(50, 40);
228 VerifyWorker(100, 0); 226 VerifyWorker(100, 0);
229 DestroyParallelJob(); 227 DestroyParallelJob();
230 } 228 }
231 229
232 // Ensure the holes before the initial request offset is patched up with
233 // parallel requests.
234 // This may happen when the previous session is non-parallel but the new
235 // session is parallel, and etag doesn't change.
236 TEST_F(ParallelDownloadJobTest, CreateNewRequestsIncorrectInitOffset) {
237 // Although we can parallel 4 requests, but we find 2 holes, so just patch
238 // them up with 2 requests.
239 // The offset of 2 slices to download are before the initial request offset.
240 DownloadItem::ReceivedSlices slices = {DownloadItem::ReceivedSlice(40, 5)};
241 CreateParallelJob(50, 0, 50, slices, 4, 1);
242 BuildParallelRequests();
243 EXPECT_EQ(2, static_cast<int>(job_->workers().size()));
244 VerifyWorker(0, 40);
245 VerifyWorker(45, 0);
246 DestroyParallelJob();
247
248 // There is one slice to download before initial request offset, so we just
249 // build one request.
250 CreateParallelJob(50, 0, 50, DownloadItem::ReceivedSlices(), 4, 1);
251 BuildParallelRequests();
252 EXPECT_EQ(1, static_cast<int>(job_->workers().size()));
253 VerifyWorker(0, 0);
254 DestroyParallelJob();
255 }
256
257 // Pause, cancel, resume can be called before or after the worker establish 230 // Pause, cancel, resume can be called before or after the worker establish
258 // the byte stream. 231 // the byte stream.
259 // These tests ensure the states consistency between the job and workers. 232 // These tests ensure the states consistency between the job and workers.
260 233
261 // Ensure cancel before building the requests will result in no requests are 234 // Ensure cancel before building the requests will result in no requests are
262 // built. 235 // built.
263 TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeBuildRequests) { 236 TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeBuildRequests) {
264 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); 237 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1);
265 EXPECT_CALL(*mock_request_handle_, CancelRequest()); 238 EXPECT_CALL(*mock_request_handle_, CancelRequest());
266 239
267 // Job is canceled before building parallel requests. 240 // Job is canceled before building parallel requests.
268 job_->Cancel(true); 241 job_->Cancel(true);
269 EXPECT_TRUE(IsJobCanceled()); 242 EXPECT_TRUE(IsJobCanceled());
270 243
271 BuildParallelRequests(); 244 BuildParallelRequests();
272 EXPECT_TRUE(job_->workers().empty()); 245 EXPECT_TRUE(job_->workers().empty());
273 246
274 DestroyParallelJob(); 247 DestroyParallelJob();
275 } 248 }
276 249
277 // Ensure cancel before adding the byte stream will result in workers being 250 // Ensure cancel before adding the byte stream will result in workers being
278 // canceled. 251 // canceled.
279 TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeByteStreamReady) { 252 TEST_F(ParallelDownloadJobTest, EarlyCancelBeforeByteStreamReady) {
280 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); 253 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1);
281 EXPECT_CALL(*mock_request_handle_, CancelRequest()); 254 EXPECT_CALL(*mock_request_handle_, CancelRequest());
282 255
283 BuildParallelRequests(); 256 BuildParallelRequests();
284 VerifyWorker(50, 0); 257 VerifyWorker(50, 0);
285 258
286 // Job is canceled after building parallel requests and before byte streams 259 // Job is canceled after building parallel requests and before byte streams
287 // are added to the file sink. 260 // are added to the file sink.
288 job_->Cancel(true); 261 job_->Cancel(true);
289 EXPECT_TRUE(IsJobCanceled()); 262 EXPECT_TRUE(IsJobCanceled());
290 263
291 for (auto& worker : job_->workers()) { 264 for (auto& worker : job_->workers()) {
292 std::unique_ptr<MockDownloadRequestHandle> mock_handle = 265 std::unique_ptr<MockDownloadRequestHandle> mock_handle =
293 base::MakeUnique<MockDownloadRequestHandle>(); 266 base::MakeUnique<MockDownloadRequestHandle>();
294 EXPECT_CALL(*mock_handle.get(), CancelRequest()); 267 EXPECT_CALL(*mock_handle.get(), CancelRequest());
295 MakeWorkerReady(worker.second.get(), std::move(mock_handle)); 268 MakeWorkerReady(worker.second.get(), std::move(mock_handle));
296 } 269 }
297 270
298 DestroyParallelJob(); 271 DestroyParallelJob();
299 } 272 }
300 273
301 // Ensure pause before adding the byte stream will result in workers being 274 // Ensure pause before adding the byte stream will result in workers being
302 // paused. 275 // paused.
303 TEST_F(ParallelDownloadJobTest, EarlyPauseBeforeByteStreamReady) { 276 TEST_F(ParallelDownloadJobTest, EarlyPauseBeforeByteStreamReady) {
304 CreateParallelJob(0, 0, 100, DownloadItem::ReceivedSlices(), 2, 1); 277 CreateParallelJob(0, 100, DownloadItem::ReceivedSlices(), 2, 1);
305 EXPECT_CALL(*mock_request_handle_, PauseRequest()); 278 EXPECT_CALL(*mock_request_handle_, PauseRequest());
306 279
307 BuildParallelRequests(); 280 BuildParallelRequests();
308 VerifyWorker(50, 0); 281 VerifyWorker(50, 0);
309 282
310 // Job is paused after building parallel requests and before adding the byte 283 // Job is paused after building parallel requests and before adding the byte
311 // stream to the file sink. 284 // stream to the file sink.
312 job_->Pause(); 285 job_->Pause();
313 EXPECT_TRUE(job_->is_paused()); 286 EXPECT_TRUE(job_->is_paused());
314 287
315 for (auto& worker : job_->workers()) { 288 for (auto& worker : job_->workers()) {
316 EXPECT_CALL(*job_.get(), CountOnByteStreamReady()); 289 EXPECT_CALL(*job_.get(), CountOnByteStreamReady());
317 std::unique_ptr<MockDownloadRequestHandle> mock_handle = 290 std::unique_ptr<MockDownloadRequestHandle> mock_handle =
318 base::MakeUnique<MockDownloadRequestHandle>(); 291 base::MakeUnique<MockDownloadRequestHandle>();
319 EXPECT_CALL(*mock_handle.get(), PauseRequest()); 292 EXPECT_CALL(*mock_handle.get(), PauseRequest());
320 MakeWorkerReady(worker.second.get(), std::move(mock_handle)); 293 MakeWorkerReady(worker.second.get(), std::move(mock_handle));
321 } 294 }
322 295
323 DestroyParallelJob(); 296 DestroyParallelJob();
324 } 297 }
325 298
326 } // namespace content 299 } // namespace content
OLDNEW
« no previous file with comments | « content/browser/download/parallel_download_job.cc ('k') | tools/metrics/histograms/histograms.xml » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698