Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | 1 // Copyright 2017 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/browser/download/parallel_download_job.h" | 5 #include "content/browser/download/parallel_download_job.h" |
| 6 | 6 |
| 7 #include "base/memory/ptr_util.h" | 7 #include "base/memory/ptr_util.h" |
| 8 #include "content/browser/download/download_create_info.h" | 8 #include "content/browser/download/download_create_info.h" |
| 9 #include "content/browser/download/parallel_download_utils.h" | 9 #include "content/browser/download/parallel_download_utils.h" |
| 10 #include "content/public/browser/browser_context.h" | 10 #include "content/public/browser/browser_context.h" |
| 11 #include "content/public/browser/storage_partition.h" | 11 #include "content/public/browser/storage_partition.h" |
| 12 | 12 |
| 13 namespace content { | 13 namespace content { |
| 14 | 14 |
| 15 ParallelDownloadJob::ParallelDownloadJob( | 15 ParallelDownloadJob::ParallelDownloadJob( |
| 16 DownloadItemImpl* download_item, | 16 DownloadItemImpl* download_item, |
| 17 std::unique_ptr<DownloadRequestHandleInterface> request_handle, | 17 std::unique_ptr<DownloadRequestHandleInterface> request_handle, |
| 18 const DownloadCreateInfo& create_info) | 18 const DownloadCreateInfo& create_info) |
| 19 : DownloadJobImpl(download_item, std::move(request_handle)), | 19 : DownloadJobImpl(download_item, std::move(request_handle)), |
| 20 initial_request_offset_(create_info.save_info->offset), | 20 initial_request_offset_(create_info.offset), |
| 21 initial_request_length_(create_info.save_info->length) {} | 21 content_length_(create_info.total_bytes) {} |
| 22 | 22 |
| 23 ParallelDownloadJob::~ParallelDownloadJob() = default; | 23 ParallelDownloadJob::~ParallelDownloadJob() = default; |
| 24 | 24 |
| 25 void ParallelDownloadJob::Start() { | 25 void ParallelDownloadJob::Start() { |
| 26 DownloadJobImpl::Start(); | 26 DownloadJobImpl::Start(); |
| 27 | 27 |
| 28 BuildParallelRequests(); | 28 BuildParallelRequests(); |
| 29 } | 29 } |
| 30 | 30 |
| 31 void ParallelDownloadJob::Cancel(bool user_cancel) { | 31 void ParallelDownloadJob::Cancel(bool user_cancel) { |
| 32 DownloadJobImpl::Cancel(user_cancel); | 32 DownloadJobImpl::Cancel(user_cancel); |
| 33 for (auto& worker : workers_) | 33 for (auto& worker : workers_) |
| 34 worker->Cancel(); | 34 worker.second->Cancel(); |
| 35 } | 35 } |
| 36 | 36 |
| 37 void ParallelDownloadJob::Pause() { | 37 void ParallelDownloadJob::Pause() { |
| 38 DownloadJobImpl::Pause(); | 38 DownloadJobImpl::Pause(); |
| 39 for (auto& worker : workers_) | 39 for (auto& worker : workers_) |
| 40 worker->Pause(); | 40 worker.second->Pause(); |
| 41 } | 41 } |
| 42 | 42 |
| 43 void ParallelDownloadJob::Resume(bool resume_request) { | 43 void ParallelDownloadJob::Resume(bool resume_request) { |
| 44 DownloadJobImpl::Resume(resume_request); | 44 DownloadJobImpl::Resume(resume_request); |
| 45 if (!resume_request) | 45 if (!resume_request) |
| 46 return; | 46 return; |
| 47 | 47 |
| 48 for (auto& worker : workers_) | 48 for (auto& worker : workers_) |
| 49 worker->Resume(); | 49 worker.second->Resume(); |
| 50 } | 50 } |
| 51 | 51 |
| 52 void ParallelDownloadJob::ForkRequestsForNewDownload(int64_t bytes_received, | 52 int ParallelDownloadJob::GetParallelRequestCount() const { |
| 53 int64_t total_bytes, | 53 return GetParallelRequestCountConfig(); |
| 54 int request_count) { | 54 } |
| 55 if (!download_item_ || total_bytes <= 0 || bytes_received >= total_bytes || | |
| 56 request_count <= 1) { | |
| 57 return; | |
| 58 } | |
| 59 | 55 |
| 60 int64_t bytes_left = total_bytes - bytes_received; | 56 void ParallelDownloadJob::OnByteStreamReady( |
| 61 int64_t slice_size = bytes_left / request_count; | 57 DownloadWorker* worker, |
| 62 slice_size = slice_size > 0 ? slice_size : 1; | 58 std::unique_ptr<ByteStreamReader> stream_reader) { |
| 63 int num_requests = bytes_left / slice_size; | 59 DownloadJob::AddByteStream(std::move(stream_reader), worker->offset(), |
| 64 int64_t current_offset = bytes_received + slice_size; | 60 worker->length()); |
| 65 | |
| 66 // TODO(xingliu): Add records for slices in history db. | |
| 67 for (int i = 0; i < num_requests - 1; ++i) { | |
| 68 int64_t length = (i == (num_requests - 2)) | |
| 69 ? slice_size + (bytes_left % slice_size) | |
| 70 : slice_size; | |
| 71 CreateRequest(current_offset, length); | |
| 72 current_offset += slice_size; | |
| 73 } | |
| 74 } | 61 } |
| 75 | 62 |
| 76 void ParallelDownloadJob::BuildParallelRequests() { | 63 void ParallelDownloadJob::BuildParallelRequests() { |
| 77 // Calculate the slices to download and fork parallel requests. | 64 // TODO(qinmin): The size of |slices_to_download| should be no larger than |
| 78 std::vector<DownloadItem::ReceivedSlice> slices_to_download = | 65 // |kParallelRequestCount| unless |kParallelRequestCount| is changed after |
| 79 FindSlicesToDownload(download_item_->GetReceivedSlices()); | 66 // a download is interrupted. This could happen if we use finch to config |
| 80 // The initial request has already been sent, it should cover the first slice. | 67 // the number of parallel requests. |
| 81 DCHECK_GE(slices_to_download[0].offset, initial_request_offset_); | 68 // Get the next |kParallelRequestCount - 1| slices and fork |
| 82 DCHECK(initial_request_length_ == DownloadSaveInfo::kLengthFullContent || | 69 // new requests. For the remaining slices, they will be handled once some |
| 83 initial_request_offset_ + initial_request_length_ >= | 70 // of the workers finish their job. |
| 84 slices_to_download[0].offset + | 71 DownloadItem::ReceivedSlices slices_to_download; |
| 85 slices_to_download[0].received_bytes); | 72 if (download_item_->GetReceivedSlices().empty()) { |
| 86 if (slices_to_download.size() >= | 73 slices_to_download = FindSlicesForNewDownload( |
| 87 static_cast<size_t>(GetParallelRequestCountConfig())) { | 74 initial_request_offset_, content_length_, GetParallelRequestCount()); |
|
qinmin
2017/03/11 06:08:22
nit: shouldn't the initial_request_offset_ always
xingliu
2017/03/13 17:53:17
Thanks for this suggestion.
Yeah, this function c
| |
| 88 // The size of |slices_to_download| should be no larger than | |
| 89 // |kParallelRequestCount| unless |kParallelRequestCount| is changed after | |
| 90 // a download is interrupted. This could happen if we use finch to config | |
| 91 // the number of parallel requests. | |
| 92 // TODO(qinmin): Get the next |kParallelRequestCount - 1| slices and fork | |
| 93 // new requests. For the remaining slices, they will be handled once some | |
| 94 // of the workers finish their job. | |
| 95 } else { | 75 } else { |
| 96 // TODO(qinmin): Check the size of the last slice. If it is huge, we can | 76 // TODO(qinmin): Check the size of the last slice. If it is huge, we can |
| 97 // split it into N pieces and pass the last N-1 pirces to different workers. | 77 // split it into N pieces and pass the last N-1 pieces to different workers. |
| 98 // Otherwise, just fork |slices_to_download.size()| number of workers. | 78 // Otherwise, just fork |slices_to_download.size()| number of workers. |
| 79 slices_to_download = | |
| 80 FindSlicesToDownload(download_item_->GetReceivedSlices()); | |
| 99 } | 81 } |
| 82 | |
| 83 if (slices_to_download.empty()) | |
| 84 return; | |
| 85 | |
| 86 DCHECK_EQ(slices_to_download[0].offset, initial_request_offset_); | |
| 87 DCHECK_EQ(slices_to_download.back().received_bytes, | |
| 88 DownloadSaveInfo::kLengthFullContent); | |
| 89 | |
| 90 // Send requests, does not including the original request. | |
| 91 ForkSubRequests(slices_to_download); | |
| 92 } | |
| 93 | |
| 94 void ParallelDownloadJob::ForkSubRequests( | |
| 95 const DownloadItem::ReceivedSlices& slices_to_download) { | |
| 96 for (auto it = slices_to_download.begin() + 1; it != slices_to_download.end(); | |
|
qinmin
2017/03/11 06:08:22
const auto&
xingliu
2017/03/13 17:53:17
Done. use ++it here so const will yield compiling
| |
| 97 ++it) | |
| 98 CreateRequest(it->offset, it->received_bytes); | |
|
qinmin
2017/03/11 06:08:22
you need {} since here since the for loop spans mu
xingliu
2017/03/13 17:53:17
Done.
| |
| 100 } | 99 } |
| 101 | 100 |
| 102 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { | 101 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { |
| 103 std::unique_ptr<DownloadWorker> worker = base::MakeUnique<DownloadWorker>(); | 102 DCHECK(download_item_); |
| 104 | 103 |
| 105 DCHECK(download_item_); | 104 std::unique_ptr<DownloadWorker> worker = |
| 105 base::MakeUnique<DownloadWorker>(this, offset, length); | |
| 106 | |
| 106 StoragePartition* storage_partition = | 107 StoragePartition* storage_partition = |
| 107 BrowserContext::GetStoragePartitionForSite( | 108 BrowserContext::GetStoragePartitionForSite( |
| 108 download_item_->GetBrowserContext(), download_item_->GetSiteUrl()); | 109 download_item_->GetBrowserContext(), download_item_->GetSiteUrl()); |
| 109 | 110 |
| 110 std::unique_ptr<DownloadUrlParameters> download_params( | 111 std::unique_ptr<DownloadUrlParameters> download_params( |
| 111 new DownloadUrlParameters(download_item_->GetURL(), | 112 new DownloadUrlParameters(download_item_->GetURL(), |
| 112 storage_partition->GetURLRequestContext())); | 113 storage_partition->GetURLRequestContext())); |
| 113 download_params->set_file_path(download_item_->GetFullPath()); | 114 download_params->set_file_path(download_item_->GetFullPath()); |
| 114 download_params->set_last_modified(download_item_->GetLastModifiedTime()); | 115 download_params->set_last_modified(download_item_->GetLastModifiedTime()); |
| 115 download_params->set_etag(download_item_->GetETag()); | 116 download_params->set_etag(download_item_->GetETag()); |
| 116 download_params->set_offset(offset); | 117 download_params->set_offset(offset); |
| 117 | 118 |
| 118 // Setting the length will result in range request to fetch a slice of the | 119 // Setting the length will result in range request to fetch a slice of the |
| 119 // file. | 120 // file. |
| 120 download_params->set_length(length); | 121 download_params->set_length(length); |
| 121 | 122 |
| 122 // Subsequent range requests have the same referrer URL as the original | 123 // Subsequent range requests have the same referrer URL as the original |
| 123 // download request. | 124 // download request. |
| 124 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), | 125 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), |
| 125 blink::WebReferrerPolicyAlways)); | 126 blink::WebReferrerPolicyAlways)); |
| 126 // Send the request. | 127 // Send the request. |
| 127 worker->SendRequest(std::move(download_params)); | 128 worker->SendRequest(std::move(download_params)); |
| 128 workers_.push_back(std::move(worker)); | 129 DCHECK(workers_.find(offset) == workers_.end()); |
| 130 workers_[offset] = std::move(worker); | |
| 129 } | 131 } |
| 130 | 132 |
| 131 } // namespace content | 133 } // namespace content |
| OLD | NEW |