Chromium Code Reviews| Index: content/browser/download/parallel_download_job.cc |
| diff --git a/content/browser/download/parallel_download_job.cc b/content/browser/download/parallel_download_job.cc |
| index c8bba093fa4d9ed30d7e44329826058fa4f3727c..87905d941256ba794cd9be1d4089c52640f24625 100644 |
| --- a/content/browser/download/parallel_download_job.cc |
| +++ b/content/browser/download/parallel_download_job.cc |
| @@ -17,8 +17,8 @@ ParallelDownloadJob::ParallelDownloadJob( |
| std::unique_ptr<DownloadRequestHandleInterface> request_handle, |
| const DownloadCreateInfo& create_info) |
| : DownloadJobImpl(download_item, std::move(request_handle)), |
| - initial_request_offset_(create_info.save_info->offset), |
| - initial_request_length_(create_info.save_info->length) {} |
| + initial_request_offset_(create_info.offset), |
| + content_length_(create_info.total_bytes) {} |
| ParallelDownloadJob::~ParallelDownloadJob() = default; |
| @@ -31,13 +31,13 @@ void ParallelDownloadJob::Start() { |
| void ParallelDownloadJob::Cancel(bool user_cancel) { |
| DownloadJobImpl::Cancel(user_cancel); |
| for (auto& worker : workers_) |
| - worker->Cancel(); |
| + worker.second->Cancel(); |
| } |
| void ParallelDownloadJob::Pause() { |
| DownloadJobImpl::Pause(); |
| for (auto& worker : workers_) |
| - worker->Pause(); |
| + worker.second->Pause(); |
| } |
| void ParallelDownloadJob::Resume(bool resume_request) { |
| @@ -46,63 +46,64 @@ void ParallelDownloadJob::Resume(bool resume_request) { |
| return; |
| for (auto& worker : workers_) |
| - worker->Resume(); |
| + worker.second->Resume(); |
| } |
| -void ParallelDownloadJob::ForkRequestsForNewDownload(int64_t bytes_received, |
| - int64_t total_bytes, |
| - int request_count) { |
| - if (!download_item_ || total_bytes <= 0 || bytes_received >= total_bytes || |
| - request_count <= 1) { |
| - return; |
| - } |
| +int ParallelDownloadJob::GetParallelRequestCount() const { |
| + return GetParallelRequestCountConfig(); |
| +} |
| - int64_t bytes_left = total_bytes - bytes_received; |
| - int64_t slice_size = bytes_left / request_count; |
| - slice_size = slice_size > 0 ? slice_size : 1; |
| - int num_requests = bytes_left / slice_size; |
| - int64_t current_offset = bytes_received + slice_size; |
| - |
| - // TODO(xingliu): Add records for slices in history db. |
| - for (int i = 0; i < num_requests - 1; ++i) { |
| - int64_t length = (i == (num_requests - 2)) |
| - ? slice_size + (bytes_left % slice_size) |
| - : slice_size; |
| - CreateRequest(current_offset, length); |
| - current_offset += slice_size; |
| - } |
| +void ParallelDownloadJob::OnByteStreamReady( |
| + DownloadWorker* worker, |
| + std::unique_ptr<ByteStreamReader> stream_reader) { |
| + DownloadJob::AddByteStream(std::move(stream_reader), worker->offset(), |
| + worker->length()); |
| } |
| void ParallelDownloadJob::BuildParallelRequests() { |
| - // Calculate the slices to download and fork parallel requests. |
| - std::vector<DownloadItem::ReceivedSlice> slices_to_download = |
| - FindSlicesToDownload(download_item_->GetReceivedSlices()); |
| - // The initial request has already been sent, it should cover the first slice. |
| - DCHECK_GE(slices_to_download[0].offset, initial_request_offset_); |
| - DCHECK(initial_request_length_ == DownloadSaveInfo::kLengthFullContent || |
| - initial_request_offset_ + initial_request_length_ >= |
| - slices_to_download[0].offset + |
| - slices_to_download[0].received_bytes); |
| - if (slices_to_download.size() >= |
| - static_cast<size_t>(GetParallelRequestCountConfig())) { |
| - // The size of |slices_to_download| should be no larger than |
| - // |kParallelRequestCount| unless |kParallelRequestCount| is changed after |
| - // a download is interrupted. This could happen if we use finch to config |
| - // the number of parallel requests. |
| - // TODO(qinmin): Get the next |kParallelRequestCount - 1| slices and fork |
| - // new requests. For the remaining slices, they will be handled once some |
| - // of the workers finish their job. |
| + // TODO(qinmin): The size of |slices_to_download| should be no larger than |
| + // |kParallelRequestCount| unless |kParallelRequestCount| is changed after |
| + // a download is interrupted. This could happen if we use finch to config |
| + // the number of parallel requests. |
| + // Get the next |kParallelRequestCount - 1| slices and fork |
| + // new requests. For the remaining slices, they will be handled once some |
| + // of the workers finish their job. |
| + DownloadItem::ReceivedSlices slices_to_download; |
| + if (download_item_->GetReceivedSlices().empty()) { |
| + slices_to_download = FindSlicesForNewDownload( |
| + initial_request_offset_, content_length_, GetParallelRequestCount()); |
|
qinmin
2017/03/11 06:08:22
nit: shouldn't the initial_request_offset_ always
xingliu
2017/03/13 17:53:17
Thanks for this suggestion.
Yeah, this function c
|
| } else { |
| // TODO(qinmin): Check the size of the last slice. If it is huge, we can |
| - // split it into N pieces and pass the last N-1 pirces to different workers. |
| + // split it into N pieces and pass the last N-1 pieces to different workers. |
| // Otherwise, just fork |slices_to_download.size()| number of workers. |
| + slices_to_download = |
| + FindSlicesToDownload(download_item_->GetReceivedSlices()); |
| } |
| + |
| + if (slices_to_download.empty()) |
| + return; |
| + |
| + DCHECK_EQ(slices_to_download[0].offset, initial_request_offset_); |
| + DCHECK_EQ(slices_to_download.back().received_bytes, |
| + DownloadSaveInfo::kLengthFullContent); |
| + |
| + // Send requests, does not including the original request. |
| + ForkSubRequests(slices_to_download); |
| } |
| -void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { |
| - std::unique_ptr<DownloadWorker> worker = base::MakeUnique<DownloadWorker>(); |
| +void ParallelDownloadJob::ForkSubRequests( |
| + const DownloadItem::ReceivedSlices& slices_to_download) { |
| + for (auto it = slices_to_download.begin() + 1; it != slices_to_download.end(); |
|
qinmin
2017/03/11 06:08:22
const auto&
xingliu
2017/03/13 17:53:17
Done. use ++it here so const will yield compiling
|
| + ++it) |
| + CreateRequest(it->offset, it->received_bytes); |
|
qinmin
2017/03/11 06:08:22
you need {} since here since the for loop spans mu
xingliu
2017/03/13 17:53:17
Done.
|
| +} |
| +void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { |
| DCHECK(download_item_); |
| + |
| + std::unique_ptr<DownloadWorker> worker = |
| + base::MakeUnique<DownloadWorker>(this, offset, length); |
| + |
| StoragePartition* storage_partition = |
| BrowserContext::GetStoragePartitionForSite( |
| download_item_->GetBrowserContext(), download_item_->GetSiteUrl()); |
| @@ -125,7 +126,8 @@ void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { |
| blink::WebReferrerPolicyAlways)); |
| // Send the request. |
| worker->SendRequest(std::move(download_params)); |
| - workers_.push_back(std::move(worker)); |
| + DCHECK(workers_.find(offset) == workers_.end()); |
| + workers_[offset] = std::move(worker); |
| } |
| } // namespace content |