Chromium Code Reviews| Index: content/browser/download/parallel_download_job.cc |
| diff --git a/content/browser/download/parallel_download_job.cc b/content/browser/download/parallel_download_job.cc |
| index b9df978537da94ee946704a3c3d0cb3d82dfa233..3195315f25f6952b51b1bd69d5f3134ca97f0c5b 100644 |
| --- a/content/browser/download/parallel_download_job.cc |
| +++ b/content/browser/download/parallel_download_job.cc |
| @@ -6,6 +6,7 @@ |
| #include "base/memory/ptr_util.h" |
| #include "content/browser/download/download_create_info.h" |
| +#include "content/browser/download/download_stats.h" |
| #include "content/browser/download/parallel_download_utils.h" |
| #include "content/public/browser/browser_context.h" |
| #include "content/public/browser/storage_partition.h" |
| @@ -23,7 +24,6 @@ ParallelDownloadJob::ParallelDownloadJob( |
| const DownloadCreateInfo& create_info) |
| : DownloadJobImpl(download_item, std::move(request_handle)), |
| initial_request_offset_(create_info.offset), |
| - initial_request_length_(create_info.length), |
| content_length_(create_info.total_bytes), |
| requests_sent_(false), |
| is_canceled_(false) {} |
| @@ -102,6 +102,7 @@ void ParallelDownloadJob::OnByteStreamReady( |
| std::unique_ptr<ByteStreamReader> stream_reader) { |
| bool success = DownloadJob::AddByteStream(std::move(stream_reader), |
| worker->offset(), worker->length()); |
| + RecordParallelDownloadAddStreamSuccess(success); |
| // Destroy the request if the sink is gone. |
| if (!success) { |
| @@ -138,17 +139,11 @@ void ParallelDownloadJob::BuildParallelRequests() { |
| DCHECK(!slices_to_download.empty()); |
| int64_t first_slice_offset = slices_to_download[0].offset; |
| - |
| - if (initial_request_offset_ > first_slice_offset) { |
| - DVLOG(kVerboseLevel) << "Initial request is after the first slice to" |
| - " download."; |
| - } |
| + DCHECK(initial_request_offset_ <= first_slice_offset); |
| // Create more slices for a new download. The initial request may generate |
| - // a received slice. If there are holes before |initial_request_offset_|, |
| - // don't create more slices. |
| + // a received slice. |
| if (slices_to_download.size() <= 1 && |
| - initial_request_length_ == DownloadSaveInfo::kLengthFullContent && |
| initial_request_offset_ <= first_slice_offset) { |
| // TODO(qinmin): Check the size of the last slice. If it is huge, we can |
| // split it into N pieces and pass the last N-1 pieces to different workers. |
| @@ -164,26 +159,21 @@ void ParallelDownloadJob::BuildParallelRequests() { |
| DownloadSaveInfo::kLengthFullContent); |
| ForkSubRequests(slices_to_download); |
| + RecordParallelDownloadRequestCount( |
| + static_cast<int>(slices_to_download.size())); |
| requests_sent_ = true; |
| } |
| void ParallelDownloadJob::ForkSubRequests( |
| const DownloadItem::ReceivedSlices& slices_to_download) { |
| - bool initial_request_skipped = false; |
| - for (auto it = slices_to_download.begin(); it != slices_to_download.end(); |
| + if (slices_to_download.size() < 2) |
| + return; |
| + |
| + // Assume the first slice to download will be handled by the initial request. |
| + for (auto it = slices_to_download.begin() + 1; it != slices_to_download.end(); |
|
qinmin
2017/04/03 18:36:19
nit: const auto& it
xingliu
2017/04/03 20:38:05
This will yield compiling error, so does auto& it.
qinmin
2017/04/03 20:46:49
Really? "for (const auto& slice : slices_to_downlo
|
| ++it) { |
| - // Create requests for holes before the |initial_request_offset_|. |
| - if (it->offset < initial_request_offset_) { |
| - CreateRequest(it->offset, it->received_bytes); |
| - continue; |
| - } |
| - |
| - // Assume the first slice to download after |initial_request_offset_| will |
| - // be handled by the initial request. |
| - if (initial_request_skipped) |
| - CreateRequest(it->offset, it->received_bytes); |
| - else |
| - initial_request_skipped = true; |
| + DCHECK(it->offset >= initial_request_offset_); |
|
David Trainor- moved to gerrit
2017/04/03 17:59:57
DCHECK_GE?
xingliu
2017/04/03 20:38:05
Done.
Also replaced a DCHECK with DCHECK_LE in an
|
| + CreateRequest(it->offset, it->received_bytes); |
| } |
| } |