Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | 1 // Copyright 2017 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/browser/download/parallel_download_job.h" | 5 #include "content/browser/download/parallel_download_job.h" |
| 6 | 6 |
| 7 #include "base/memory/ptr_util.h" | 7 #include "base/memory/ptr_util.h" |
| 8 #include "content/browser/download/download_create_info.h" | |
| 9 #include "content/browser/download/parallel_download_utils.h" | |
| 8 #include "content/public/browser/browser_context.h" | 10 #include "content/public/browser/browser_context.h" |
| 9 #include "content/public/browser/storage_partition.h" | 11 #include "content/public/browser/storage_partition.h" |
| 10 | 12 |
| 11 namespace content { | 13 namespace content { |
| 12 | 14 |
| 13 namespace { | 15 namespace { |
| 14 | 16 |
| 15 // TODO(xingliu): Use finch parameters to configure constants. | 17 // TODO(xingliu): Use finch parameters to configure constants. |
| 16 // Default number of requests in a parallel download, including the original | 18 // Default number of requests in a parallel download, including the original |
| 17 // request. | 19 // request. |
| 18 const int kParallelRequestCount = 2; | 20 const int kParallelRequestCount = 2; |
| 19 | 21 |
| 20 } // namespace | 22 } // namespace |
| 21 | 23 |
| 22 ParallelDownloadJob::ParallelDownloadJob( | 24 ParallelDownloadJob::ParallelDownloadJob( |
| 23 DownloadItemImpl* download_item, | 25 DownloadItemImpl* download_item, |
| 24 std::unique_ptr<DownloadRequestHandleInterface> request_handle) | 26 std::unique_ptr<DownloadRequestHandleInterface> request_handle, |
| 27 const DownloadCreateInfo& create_info) | |
| 25 : DownloadJobImpl(download_item, std::move(request_handle)), | 28 : DownloadJobImpl(download_item, std::move(request_handle)), |
| 26 request_num_(kParallelRequestCount) {} | 29 request_num_(kParallelRequestCount), |
| 30 initial_request_offset_(create_info.save_info->offset), | |
| 31 initial_request_length_(create_info.save_info->length) {} | |
| 27 | 32 |
| 28 ParallelDownloadJob::~ParallelDownloadJob() = default; | 33 ParallelDownloadJob::~ParallelDownloadJob() = default; |
| 29 | 34 |
| 35 void ParallelDownloadJob::Start() { | |
| 36 DownloadJobImpl::Start(); | |
| 37 | |
| 38 BuildParallelRequests(); | |
| 39 } | |
| 40 | |
| 30 void ParallelDownloadJob::Cancel(bool user_cancel) { | 41 void ParallelDownloadJob::Cancel(bool user_cancel) { |
| 31 DownloadJobImpl::Cancel(user_cancel); | 42 DownloadJobImpl::Cancel(user_cancel); |
| 32 for (auto& worker : workers_) | 43 for (auto& worker : workers_) |
| 33 worker->Cancel(); | 44 worker->Cancel(); |
| 34 } | 45 } |
| 35 | 46 |
| 36 void ParallelDownloadJob::Pause() { | 47 void ParallelDownloadJob::Pause() { |
| 37 DownloadJobImpl::Pause(); | 48 DownloadJobImpl::Pause(); |
| 38 for (auto& worker : workers_) | 49 for (auto& worker : workers_) |
| 39 worker->Pause(); | 50 worker->Pause(); |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 64 // TODO(xingliu): Add records for slices in history db. | 75 // TODO(xingliu): Add records for slices in history db. |
| 65 for (int i = 0; i < num_requests - 1; ++i) { | 76 for (int i = 0; i < num_requests - 1; ++i) { |
| 66 int64_t length = (i == (num_requests - 2)) | 77 int64_t length = (i == (num_requests - 2)) |
| 67 ? slice_size + (bytes_left % slice_size) | 78 ? slice_size + (bytes_left % slice_size) |
| 68 : slice_size; | 79 : slice_size; |
| 69 CreateRequest(current_offset, length); | 80 CreateRequest(current_offset, length); |
| 70 current_offset += slice_size; | 81 current_offset += slice_size; |
| 71 } | 82 } |
| 72 } | 83 } |
| 73 | 84 |
| 85 void ParallelDownloadJob::BuildParallelRequests() { | |
| 86 // Calculate the slices to download and fork parallel requests. | |
| 87 std::vector<DownloadItem::ReceivedSlice> slices_to_download = | |
| 88 FindSlicesToDownload(download_item_->GetReceivedSlices()); | |
| 89 // The initial request has already been sent, it should cover the first slice. | |
| 90 DCHECK_GE(slices_to_download[0].offset, initial_request_offset_); | |
| 91 DCHECK(initial_request_length_ == DownloadSaveInfo::kLengthFullContent || | |
| 92 initial_request_offset_ + initial_request_length_ >= | |
| 93 slices_to_download[0].offset + | |
| 94 slices_to_download[0].received_bytes); | |
| 95 if (slices_to_download.size() >= kParallelRequestCount) { | |
| 96 // The size of |slices_to_download| should be no larger than | |
| 97 // |kParallelRequestCount| unless |kParallelRequestCount| is changed after | |
| 98 // a download is interrupted. This could happen if we use finch to config | |
| 99 // the number of parallel requests. | |
| 100 // TODO(qinmin): Get the next |kParallelRequestCount - 1| slices and fork | |
| 101 // new requests. For the remaining slices, they will be handled once some | |
| 102 // of the workers finish their job. | |
| 103 } else { | |
| 104 // TODO(qinmin): Check the size of the last slice. If it is huge, we can | |
| 105 // split it into N pieces and pass the last N-1 pirces to different workers. | |
|
xingliu
2017/03/03 21:37:44
nit, N-1 pieces.
qinmin
2017/03/03 22:09:43
Done.
| |
| 106 // Otherwise, just fork |slices_to_download.size()| number of workers. | |
| 107 } | |
| 108 } | |
| 109 | |
| 74 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { | 110 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { |
| 75 std::unique_ptr<DownloadWorker> worker = base::MakeUnique<DownloadWorker>(); | 111 std::unique_ptr<DownloadWorker> worker = base::MakeUnique<DownloadWorker>(); |
| 76 | 112 |
| 77 DCHECK(download_item_); | 113 DCHECK(download_item_); |
| 78 StoragePartition* storage_partition = | 114 StoragePartition* storage_partition = |
| 79 BrowserContext::GetStoragePartitionForSite( | 115 BrowserContext::GetStoragePartitionForSite( |
| 80 download_item_->GetBrowserContext(), download_item_->GetSiteUrl()); | 116 download_item_->GetBrowserContext(), download_item_->GetSiteUrl()); |
| 81 | 117 |
| 82 std::unique_ptr<DownloadUrlParameters> download_params( | 118 std::unique_ptr<DownloadUrlParameters> download_params( |
| 83 new DownloadUrlParameters(download_item_->GetURL(), | 119 new DownloadUrlParameters(download_item_->GetURL(), |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 94 // Subsequent range requests have the same referrer URL as the original | 130 // Subsequent range requests have the same referrer URL as the original |
| 95 // download request. | 131 // download request. |
| 96 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), | 132 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), |
| 97 blink::WebReferrerPolicyAlways)); | 133 blink::WebReferrerPolicyAlways)); |
| 98 // Send the request. | 134 // Send the request. |
| 99 worker->SendRequest(std::move(download_params)); | 135 worker->SendRequest(std::move(download_params)); |
| 100 workers_.push_back(std::move(worker)); | 136 workers_.push_back(std::move(worker)); |
| 101 } | 137 } |
| 102 | 138 |
| 103 } // namespace content | 139 } // namespace content |
| OLD | NEW |