Chromium Code Reviews| Index: content/browser/download/parallel_download_job.cc |
| diff --git a/content/browser/download/parallel_download_job.cc b/content/browser/download/parallel_download_job.cc |
| index 41ba75beceffe372b04a1a4a06f4c0cc739e4bd8..bc06750fa0847b383f20d10a47c006aefa1aa4a9 100644 |
| --- a/content/browser/download/parallel_download_job.cc |
| +++ b/content/browser/download/parallel_download_job.cc |
| @@ -5,6 +5,8 @@ |
| #include "content/browser/download/parallel_download_job.h" |
| #include "base/memory/ptr_util.h" |
| +#include "content/browser/download/download_create_info.h" |
| +#include "content/browser/download/parallel_download_utils.h" |
| #include "content/public/browser/browser_context.h" |
| #include "content/public/browser/storage_partition.h" |
| @@ -21,12 +23,33 @@ const int kParallelRequestCount = 2; |
| ParallelDownloadJob::ParallelDownloadJob( |
| DownloadItemImpl* download_item, |
| - std::unique_ptr<DownloadRequestHandleInterface> request_handle) |
| + std::unique_ptr<DownloadRequestHandleInterface> request_handle, |
| + const DownloadCreateInfo& create_info) |
| : DownloadJobImpl(download_item, std::move(request_handle)), |
| - request_num_(kParallelRequestCount) {} |
| + request_num_(kParallelRequestCount), |
| + initial_request_offset_(create_info.save_info->offset), |
| + initial_request_length_(create_info.save_info->length) {} |
| ParallelDownloadJob::~ParallelDownloadJob() = default; |
| +void ParallelDownloadJob::Start() { |
| + DownloadJobImpl::Start(); |
| + |
| + std::vector<DownloadItem::ReceivedSlice> slices_to_download = |
|
xingliu
2017/03/03 18:44:15
There is already a function to build requests for
qinmin
2017/03/03 19:32:35
Start() is called on both resumption and new downl
qinmin
2017/03/03 19:44:33
To clarify, Start() is called when Resuming an int
xingliu
2017/03/03 21:37:44
sgtm.
|
| + FindSlicesToDownload(download_item_->GetReceivedSlices()); |
| + // The initial request should cover the first slice. |
| + DCHECK_GE(slices_to_download[0].offset, initial_request_offset_); |
| + DCHECK(initial_request_length_ == DownloadSaveInfo::kLengthFullContent || |
| + initial_request_offset_ + initial_request_length_ >= |
| + slices_to_download[0].offset + |
| + slices_to_download[0].received_bytes); |
| + if (slices_to_download.size() >= kParallelRequestCount) { |
|
xingliu
2017/03/03 18:44:15
Maybe we can keep the mechanism simple for the fir
qinmin
2017/03/03 19:32:35
For resumption, there could be less than N slices.
xingliu
2017/03/03 21:37:44
In general sgtm if the slices are written to db af
qinmin
2017/03/03 22:09:43
what do you mean by "write slice info"?
You mean
|
| + // Get the next |kParallelRequestCount - 1| slices and fork new requests. |
| + } else { |
| + // Split the last slice and assign it to different workers. |
| + } |
| +} |
| + |
| void ParallelDownloadJob::Cancel(bool user_cancel) { |
| DownloadJobImpl::Cancel(user_cancel); |
| for (auto& worker : workers_) |