Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | 1 // Copyright 2017 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/browser/download/parallel_download_job.h" | 5 #include "content/browser/download/parallel_download_job.h" |
| 6 | 6 |
| 7 #include "base/memory/ptr_util.h" | 7 #include "base/memory/ptr_util.h" |
| 8 #include "content/browser/download/download_create_info.h" | 8 #include "content/browser/download/download_create_info.h" |
| 9 #include "content/browser/download/parallel_download_utils.h" | 9 #include "content/browser/download/parallel_download_utils.h" |
| 10 #include "content/public/browser/browser_context.h" | 10 #include "content/public/browser/browser_context.h" |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 42 | 42 |
| 43 void ParallelDownloadJob::Resume(bool resume_request) { | 43 void ParallelDownloadJob::Resume(bool resume_request) { |
| 44 DownloadJobImpl::Resume(resume_request); | 44 DownloadJobImpl::Resume(resume_request); |
| 45 if (!resume_request) | 45 if (!resume_request) |
| 46 return; | 46 return; |
| 47 | 47 |
| 48 for (auto& worker : workers_) | 48 for (auto& worker : workers_) |
| 49 worker->Resume(); | 49 worker->Resume(); |
| 50 } | 50 } |
| 51 | 51 |
| 52 void ParallelDownloadJob::OnServerResponseError( | |
| 53 DownloadWorker* worker, | |
| 54 DownloadInterruptReason reason) { | |
| 55 // TODO(xingliu): Consider to let the original request to cover the full | |
|
David Trainor- moved to gerrit
2017/03/14 18:01:43
Would this ever depend on how much content we've '
xingliu
2017/03/14 21:33:21
Yes, we have the logic to limit the length of prec
| |
| 56 // content if the sub-requests get invalid response. | |
| 57 DownloadJob::Interrupt(reason); | |
| 58 } | |
| 59 | |
| 52 void ParallelDownloadJob::ForkRequestsForNewDownload(int64_t bytes_received, | 60 void ParallelDownloadJob::ForkRequestsForNewDownload(int64_t bytes_received, |
| 53 int64_t total_bytes, | 61 int64_t total_bytes, |
| 54 int request_count) { | 62 int request_count) { |
| 55 if (!download_item_ || total_bytes <= 0 || bytes_received >= total_bytes || | 63 if (!download_item_ || total_bytes <= 0 || bytes_received >= total_bytes || |
| 56 request_count <= 1) { | 64 request_count <= 1) { |
| 57 return; | 65 return; |
| 58 } | 66 } |
| 59 | 67 |
| 60 int64_t bytes_left = total_bytes - bytes_received; | 68 int64_t bytes_left = total_bytes - bytes_received; |
| 61 int64_t slice_size = bytes_left / request_count; | 69 int64_t slice_size = bytes_left / request_count; |
| 62 slice_size = slice_size > 0 ? slice_size : 1; | 70 slice_size = slice_size > 0 ? slice_size : 1; |
| 63 int num_requests = bytes_left / slice_size; | 71 int num_requests = bytes_left / slice_size; |
| 64 int64_t current_offset = bytes_received + slice_size; | 72 int64_t current_offset = bytes_received + slice_size; |
| 65 | 73 |
| 66 // TODO(xingliu): Add records for slices in history db. | |
| 67 for (int i = 0; i < num_requests - 1; ++i) { | 74 for (int i = 0; i < num_requests - 1; ++i) { |
| 68 int64_t length = (i == (num_requests - 2)) | 75 int64_t length = (i == (num_requests - 2)) |
| 69 ? slice_size + (bytes_left % slice_size) | 76 ? slice_size + (bytes_left % slice_size) |
| 70 : slice_size; | 77 : slice_size; |
| 71 CreateRequest(current_offset, length); | 78 CreateRequest(current_offset, length); |
| 72 current_offset += slice_size; | 79 current_offset += slice_size; |
| 73 } | 80 } |
| 74 } | 81 } |
| 75 | 82 |
| 76 void ParallelDownloadJob::BuildParallelRequests() { | 83 void ParallelDownloadJob::BuildParallelRequests() { |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 93 // new requests. For the remaining slices, they will be handled once some | 100 // new requests. For the remaining slices, they will be handled once some |
| 94 // of the workers finish their job. | 101 // of the workers finish their job. |
| 95 } else { | 102 } else { |
| 96 // TODO(qinmin): Check the size of the last slice. If it is huge, we can | 103 // TODO(qinmin): Check the size of the last slice. If it is huge, we can |
| 97 // split it into N pieces and pass the last N-1 pirces to different workers. | 104 // split it into N pieces and pass the last N-1 pirces to different workers. |
| 98 // Otherwise, just fork |slices_to_download.size()| number of workers. | 105 // Otherwise, just fork |slices_to_download.size()| number of workers. |
| 99 } | 106 } |
| 100 } | 107 } |
| 101 | 108 |
| 102 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { | 109 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { |
| 103 std::unique_ptr<DownloadWorker> worker = base::MakeUnique<DownloadWorker>(); | 110 std::unique_ptr<DownloadWorker> worker = |
| 111 base::MakeUnique<DownloadWorker>(this, offset, length); | |
| 104 | 112 |
| 105 DCHECK(download_item_); | 113 DCHECK(download_item_); |
| 106 StoragePartition* storage_partition = | 114 StoragePartition* storage_partition = |
| 107 BrowserContext::GetStoragePartitionForSite( | 115 BrowserContext::GetStoragePartitionForSite( |
| 108 download_item_->GetBrowserContext(), download_item_->GetSiteUrl()); | 116 download_item_->GetBrowserContext(), download_item_->GetSiteUrl()); |
| 109 | 117 |
| 110 std::unique_ptr<DownloadUrlParameters> download_params( | 118 std::unique_ptr<DownloadUrlParameters> download_params( |
| 111 new DownloadUrlParameters(download_item_->GetURL(), | 119 new DownloadUrlParameters(download_item_->GetURL(), |
| 112 storage_partition->GetURLRequestContext())); | 120 storage_partition->GetURLRequestContext())); |
| 113 download_params->set_file_path(download_item_->GetFullPath()); | 121 download_params->set_file_path(download_item_->GetFullPath()); |
| 114 download_params->set_last_modified(download_item_->GetLastModifiedTime()); | 122 download_params->set_last_modified(download_item_->GetLastModifiedTime()); |
| 115 download_params->set_etag(download_item_->GetETag()); | 123 download_params->set_etag(download_item_->GetETag()); |
| 116 download_params->set_offset(offset); | 124 download_params->set_offset(offset); |
| 117 | 125 |
| 118 // Setting the length will result in range request to fetch a slice of the | 126 // Setting the length will result in range request to fetch a slice of the |
| 119 // file. | 127 // file. |
| 120 download_params->set_length(length); | 128 download_params->set_length(length); |
| 121 | 129 |
| 122 // Subsequent range requests have the same referrer URL as the original | 130 // Subsequent range requests have the same referrer URL as the original |
| 123 // download request. | 131 // download request. |
| 124 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), | 132 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), |
| 125 blink::WebReferrerPolicyAlways)); | 133 blink::WebReferrerPolicyAlways)); |
| 126 // Send the request. | 134 // Send the request. |
| 127 worker->SendRequest(std::move(download_params)); | 135 worker->SendRequest(std::move(download_params)); |
| 128 workers_.push_back(std::move(worker)); | 136 workers_.push_back(std::move(worker)); |
| 129 } | 137 } |
| 130 | 138 |
| 131 } // namespace content | 139 } // namespace content |
| OLD | NEW |