| OLD | NEW |
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | 1 // Copyright 2017 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/browser/download/parallel_download_job.h" | 5 #include "content/browser/download/parallel_download_job.h" |
| 6 | 6 |
| 7 #include "base/memory/ptr_util.h" | 7 #include "base/memory/ptr_util.h" |
| 8 #include "content/browser/download/download_create_info.h" | 8 #include "content/browser/download/download_create_info.h" |
| 9 #include "content/browser/download/parallel_download_utils.h" | 9 #include "content/browser/download/parallel_download_utils.h" |
| 10 #include "content/public/browser/browser_context.h" | 10 #include "content/public/browser/browser_context.h" |
| 11 #include "content/public/browser/storage_partition.h" | 11 #include "content/public/browser/storage_partition.h" |
| 12 | 12 |
| 13 namespace content { | 13 namespace content { |
| 14 | 14 |
| 15 ParallelDownloadJob::ParallelDownloadJob( | 15 ParallelDownloadJob::ParallelDownloadJob( |
| 16 DownloadItemImpl* download_item, | 16 DownloadItemImpl* download_item, |
| 17 std::unique_ptr<DownloadRequestHandleInterface> request_handle, | 17 std::unique_ptr<DownloadRequestHandleInterface> request_handle, |
| 18 const DownloadCreateInfo& create_info) | 18 const DownloadCreateInfo& create_info) |
| 19 : DownloadJobImpl(download_item, std::move(request_handle)), | 19 : DownloadJobImpl(download_item, std::move(request_handle)), |
| 20 initial_request_offset_(create_info.save_info->offset), | 20 initial_request_offset_(create_info.offset), |
| 21 initial_request_length_(create_info.save_info->length), | 21 content_length_(create_info.total_bytes), |
| 22 requests_sent_(false) {} | 22 requests_sent_(false) {} |
| 23 | 23 |
| 24 ParallelDownloadJob::~ParallelDownloadJob() = default; | 24 ParallelDownloadJob::~ParallelDownloadJob() = default; |
| 25 | 25 |
| 26 void ParallelDownloadJob::Start() { | 26 void ParallelDownloadJob::Start() { |
| 27 DownloadJobImpl::Start(); | 27 DownloadJobImpl::Start(); |
| 28 | 28 |
| 29 BuildParallelRequestAfterDelay(); | 29 BuildParallelRequestAfterDelay(); |
| 30 } | 30 } |
| 31 | 31 |
| 32 void ParallelDownloadJob::Cancel(bool user_cancel) { | 32 void ParallelDownloadJob::Cancel(bool user_cancel) { |
| 33 DownloadJobImpl::Cancel(user_cancel); | 33 DownloadJobImpl::Cancel(user_cancel); |
| 34 | 34 |
| 35 if (!requests_sent_) { | 35 if (!requests_sent_) { |
| 36 timer_.Stop(); | 36 timer_.Stop(); |
| 37 return; | 37 return; |
| 38 } | 38 } |
| 39 | 39 |
| 40 for (auto& worker : workers_) | 40 for (auto& worker : workers_) |
| 41 worker->Cancel(); | 41 worker.second->Cancel(); |
| 42 } | 42 } |
| 43 | 43 |
| 44 void ParallelDownloadJob::Pause() { | 44 void ParallelDownloadJob::Pause() { |
| 45 DownloadJobImpl::Pause(); | 45 DownloadJobImpl::Pause(); |
| 46 | 46 |
| 47 if (!requests_sent_) { | 47 if (!requests_sent_) { |
| 48 timer_.Stop(); | 48 timer_.Stop(); |
| 49 return; | 49 return; |
| 50 } | 50 } |
| 51 | 51 |
| 52 for (auto& worker : workers_) | 52 for (auto& worker : workers_) |
| 53 worker->Pause(); | 53 worker.second->Pause(); |
| 54 } | 54 } |
| 55 | 55 |
| 56 void ParallelDownloadJob::Resume(bool resume_request) { | 56 void ParallelDownloadJob::Resume(bool resume_request) { |
| 57 DownloadJobImpl::Resume(resume_request); | 57 DownloadJobImpl::Resume(resume_request); |
| 58 if (!resume_request) | 58 if (!resume_request) |
| 59 return; | 59 return; |
| 60 | 60 |
| 61 // Send parallel requests if the download is paused previously. | 61 // Send parallel requests if the download is paused previously. |
| 62 if (!requests_sent_) { | 62 if (!requests_sent_) { |
| 63 if (!timer_.IsRunning()) | 63 if (!timer_.IsRunning()) |
| 64 BuildParallelRequestAfterDelay(); | 64 BuildParallelRequestAfterDelay(); |
| 65 return; | 65 return; |
| 66 } | 66 } |
| 67 | 67 |
| 68 for (auto& worker : workers_) | 68 for (auto& worker : workers_) |
| 69 worker->Resume(); | 69 worker.second->Resume(); |
| 70 } | 70 } |
| 71 | 71 |
| 72 void ParallelDownloadJob::ForkRequestsForNewDownload(int64_t bytes_received, | 72 int ParallelDownloadJob::GetParallelRequestCount() const { |
| 73 int64_t total_bytes, | 73 return GetParallelRequestCountConfig(); |
| 74 int request_count) { | |
| 75 if (!download_item_ || total_bytes <= 0 || bytes_received >= total_bytes || | |
| 76 request_count <= 1) { | |
| 77 return; | |
| 78 } | |
| 79 | |
| 80 int64_t bytes_left = total_bytes - bytes_received; | |
| 81 int64_t slice_size = bytes_left / request_count; | |
| 82 slice_size = slice_size > 0 ? slice_size : 1; | |
| 83 int num_requests = bytes_left / slice_size; | |
| 84 int64_t current_offset = bytes_received + slice_size; | |
| 85 | |
| 86 // TODO(xingliu): Add records for slices in history db. | |
| 87 for (int i = 0; i < num_requests - 1; ++i) { | |
| 88 int64_t length = (i == (num_requests - 2)) | |
| 89 ? slice_size + (bytes_left % slice_size) | |
| 90 : slice_size; | |
| 91 CreateRequest(current_offset, length); | |
| 92 current_offset += slice_size; | |
| 93 } | |
| 94 } | 74 } |
| 95 | 75 |
| 96 void ParallelDownloadJob::BuildParallelRequestAfterDelay() { | 76 void ParallelDownloadJob::BuildParallelRequestAfterDelay() { |
| 97 DCHECK(workers_.empty()); | 77 DCHECK(workers_.empty()); |
| 98 DCHECK(!requests_sent_); | 78 DCHECK(!requests_sent_); |
| 99 DCHECK(!timer_.IsRunning()); | 79 DCHECK(!timer_.IsRunning()); |
| 100 | 80 |
| 101 timer_.Start(FROM_HERE, GetParallelRequestDelayConfig(), this, | 81 timer_.Start(FROM_HERE, GetParallelRequestDelayConfig(), this, |
| 102 &ParallelDownloadJob::BuildParallelRequests); | 82 &ParallelDownloadJob::BuildParallelRequests); |
| 103 } | 83 } |
| 104 | 84 |
| 85 void ParallelDownloadJob::OnByteStreamReady( |
| 86 DownloadWorker* worker, |
| 87 std::unique_ptr<ByteStreamReader> stream_reader) { |
| 88 DownloadJob::AddByteStream(std::move(stream_reader), worker->offset(), |
| 89 worker->length()); |
| 90 } |
| 91 |
| 105 void ParallelDownloadJob::BuildParallelRequests() { | 92 void ParallelDownloadJob::BuildParallelRequests() { |
| 106 DCHECK(!requests_sent_); | 93 DCHECK(!requests_sent_); |
| 107 | 94 // TODO(qinmin): The size of |slices_to_download| should be no larger than |
| 108 // Calculate the slices to download and fork parallel requests. | 95 // |kParallelRequestCount| unless |kParallelRequestCount| is changed after |
| 109 std::vector<DownloadItem::ReceivedSlice> slices_to_download = | 96 // a download is interrupted. This could happen if we use finch to config |
| 110 FindSlicesToDownload(download_item_->GetReceivedSlices()); | 97 // the number of parallel requests. |
| 111 // The initial request has already been sent, it should cover the first slice. | 98 // Get the next |kParallelRequestCount - 1| slices and fork |
| 112 DCHECK_GE(slices_to_download[0].offset, initial_request_offset_); | 99 // new requests. For the remaining slices, they will be handled once some |
| 113 DCHECK(initial_request_length_ == DownloadSaveInfo::kLengthFullContent || | 100 // of the workers finish their job. |
| 114 initial_request_offset_ + initial_request_length_ >= | 101 DownloadItem::ReceivedSlices slices_to_download; |
| 115 slices_to_download[0].offset + | 102 if (download_item_->GetReceivedSlices().empty()) { |
| 116 slices_to_download[0].received_bytes); | 103 slices_to_download = FindSlicesForRemainingContent( |
| 117 if (slices_to_download.size() >= | 104 initial_request_offset_, content_length_, GetParallelRequestCount()); |
| 118 static_cast<size_t>(GetParallelRequestCountConfig())) { | |
| 119 // The size of |slices_to_download| should be no larger than | |
| 120 // |kParallelRequestCount| unless |kParallelRequestCount| is changed after | |
| 121 // a download is interrupted. This could happen if we use finch to config | |
| 122 // the number of parallel requests. | |
| 123 // TODO(qinmin): Get the next |kParallelRequestCount - 1| slices and fork | |
| 124 // new requests. For the remaining slices, they will be handled once some | |
| 125 // of the workers finish their job. | |
| 126 } else { | 105 } else { |
| 127 // TODO(qinmin): Check the size of the last slice. If it is huge, we can | 106 // TODO(qinmin): Check the size of the last slice. If it is huge, we can |
| 128 // split it into N pieces and pass the last N-1 pirces to different workers. | 107 // split it into N pieces and pass the last N-1 pieces to different workers. |
| 129 // Otherwise, just fork |slices_to_download.size()| number of workers. | 108 // Otherwise, just fork |slices_to_download.size()| number of workers. |
| 109 slices_to_download = |
| 110 FindSlicesToDownload(download_item_->GetReceivedSlices()); |
| 130 } | 111 } |
| 131 | 112 |
| 113 if (slices_to_download.empty()) |
| 114 return; |
| 115 |
| 116 DCHECK_EQ(slices_to_download[0].offset, initial_request_offset_); |
| 117 DCHECK_EQ(slices_to_download.back().received_bytes, |
| 118 DownloadSaveInfo::kLengthFullContent); |
| 119 |
| 120 // Send requests, does not including the original request. |
| 121 ForkSubRequests(slices_to_download); |
| 122 |
| 132 requests_sent_ = true; | 123 requests_sent_ = true; |
| 133 } | 124 } |
| 134 | 125 |
| 126 void ParallelDownloadJob::ForkSubRequests( |
| 127 const DownloadItem::ReceivedSlices& slices_to_download) { |
| 128 if (slices_to_download.size() < 2) |
| 129 return; |
| 130 |
| 131 for (auto it = slices_to_download.begin() + 1; it != slices_to_download.end(); |
| 132 ++it) { |
| 133 // received_bytes here is the bytes need to download. |
| 134 CreateRequest(it->offset, it->received_bytes); |
| 135 } |
| 136 } |
| 137 |
| 135 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { | 138 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { |
| 136 std::unique_ptr<DownloadWorker> worker = base::MakeUnique<DownloadWorker>(); | 139 DCHECK(download_item_); |
| 137 | 140 |
| 138 DCHECK(download_item_); | 141 std::unique_ptr<DownloadWorker> worker = |
| 142 base::MakeUnique<DownloadWorker>(this, offset, length); |
| 143 |
| 139 StoragePartition* storage_partition = | 144 StoragePartition* storage_partition = |
| 140 BrowserContext::GetStoragePartitionForSite( | 145 BrowserContext::GetStoragePartitionForSite( |
| 141 download_item_->GetBrowserContext(), download_item_->GetSiteUrl()); | 146 download_item_->GetBrowserContext(), download_item_->GetSiteUrl()); |
| 142 | 147 |
| 143 std::unique_ptr<DownloadUrlParameters> download_params( | 148 std::unique_ptr<DownloadUrlParameters> download_params( |
| 144 new DownloadUrlParameters(download_item_->GetURL(), | 149 new DownloadUrlParameters(download_item_->GetURL(), |
| 145 storage_partition->GetURLRequestContext())); | 150 storage_partition->GetURLRequestContext())); |
| 146 download_params->set_file_path(download_item_->GetFullPath()); | 151 download_params->set_file_path(download_item_->GetFullPath()); |
| 147 download_params->set_last_modified(download_item_->GetLastModifiedTime()); | 152 download_params->set_last_modified(download_item_->GetLastModifiedTime()); |
| 148 download_params->set_etag(download_item_->GetETag()); | 153 download_params->set_etag(download_item_->GetETag()); |
| 149 download_params->set_offset(offset); | 154 download_params->set_offset(offset); |
| 150 | 155 |
| 151 // Setting the length will result in range request to fetch a slice of the | 156 // Setting the length will result in range request to fetch a slice of the |
| 152 // file. | 157 // file. |
| 153 download_params->set_length(length); | 158 download_params->set_length(length); |
| 154 | 159 |
| 155 // Subsequent range requests have the same referrer URL as the original | 160 // Subsequent range requests have the same referrer URL as the original |
| 156 // download request. | 161 // download request. |
| 157 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), | 162 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), |
| 158 blink::WebReferrerPolicyAlways)); | 163 blink::WebReferrerPolicyAlways)); |
| 159 // Send the request. | 164 // Send the request. |
| 160 worker->SendRequest(std::move(download_params)); | 165 worker->SendRequest(std::move(download_params)); |
| 161 workers_.push_back(std::move(worker)); | 166 DCHECK(workers_.find(offset) == workers_.end()); |
| 167 workers_[offset] = std::move(worker); |
| 162 } | 168 } |
| 163 | 169 |
| 164 } // namespace content | 170 } // namespace content |
| OLD | NEW |