OLD | NEW |
1 // Copyright 2017 The Chromium Authors. All rights reserved. | 1 // Copyright 2017 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/browser/download/parallel_download_job.h" | 5 #include "content/browser/download/parallel_download_job.h" |
6 | 6 |
7 #include "base/memory/ptr_util.h" | 7 #include "base/memory/ptr_util.h" |
8 #include "content/browser/download/download_create_info.h" | 8 #include "content/browser/download/download_create_info.h" |
9 #include "content/browser/download/parallel_download_utils.h" | 9 #include "content/browser/download/parallel_download_utils.h" |
10 #include "content/public/browser/browser_context.h" | 10 #include "content/public/browser/browser_context.h" |
11 #include "content/public/browser/storage_partition.h" | 11 #include "content/public/browser/storage_partition.h" |
12 | 12 |
13 namespace content { | 13 namespace content { |
14 namespace { | 14 namespace { |
15 | 15 |
16 const int kVerboseLevel = 1; | 16 const int kVerboseLevel = 1; |
17 | 17 |
18 } // namespace | 18 } // namespace |
19 | 19 |
20 ParallelDownloadJob::ParallelDownloadJob( | 20 ParallelDownloadJob::ParallelDownloadJob( |
21 DownloadItemImpl* download_item, | 21 DownloadItemImpl* download_item, |
22 std::unique_ptr<DownloadRequestHandleInterface> request_handle, | 22 std::unique_ptr<DownloadRequestHandleInterface> request_handle, |
23 const DownloadCreateInfo& create_info) | 23 const DownloadCreateInfo& create_info) |
24 : DownloadJobImpl(download_item, std::move(request_handle)), | 24 : DownloadJobImpl(download_item, std::move(request_handle)), |
25 initial_request_offset_(create_info.offset), | 25 initial_request_offset_(create_info.offset), |
| 26 initial_request_length_(create_info.length), |
26 content_length_(create_info.total_bytes), | 27 content_length_(create_info.total_bytes), |
27 requests_sent_(false), | 28 requests_sent_(false), |
28 is_canceled_(false) {} | 29 is_canceled_(false) {} |
29 | 30 |
30 ParallelDownloadJob::~ParallelDownloadJob() = default; | 31 ParallelDownloadJob::~ParallelDownloadJob() = default; |
31 | 32 |
32 void ParallelDownloadJob::Start() { | 33 void ParallelDownloadJob::Start() { |
33 DownloadJobImpl::Start(); | 34 DownloadJobImpl::Start(); |
34 | 35 |
35 BuildParallelRequestAfterDelay(); | 36 BuildParallelRequestAfterDelay(); |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
72 return; | 73 return; |
73 } | 74 } |
74 | 75 |
75 for (auto& worker : workers_) | 76 for (auto& worker : workers_) |
76 worker.second->Resume(); | 77 worker.second->Resume(); |
77 } | 78 } |
78 | 79 |
79 int ParallelDownloadJob::GetParallelRequestCount() const { | 80 int ParallelDownloadJob::GetParallelRequestCount() const { |
80 return GetParallelRequestCountConfig(); | 81 return GetParallelRequestCountConfig(); |
81 } | 82 } |
| 83 int64_t ParallelDownloadJob::GetMinSliceSize() const { |
| 84 return GetMinSliceSizeConfig(); |
| 85 } |
82 | 86 |
83 bool ParallelDownloadJob::UsesParallelRequests() const { | 87 bool ParallelDownloadJob::UsesParallelRequests() const { |
84 return true; | 88 return true; |
85 } | 89 } |
86 | 90 |
87 void ParallelDownloadJob::BuildParallelRequestAfterDelay() { | 91 void ParallelDownloadJob::BuildParallelRequestAfterDelay() { |
88 DCHECK(workers_.empty()); | 92 DCHECK(workers_.empty()); |
89 DCHECK(!requests_sent_); | 93 DCHECK(!requests_sent_); |
90 DCHECK(!timer_.IsRunning()); | 94 DCHECK(!timer_.IsRunning()); |
91 | 95 |
(...skipping 30 matching lines...) Expand all Loading... |
122 if (is_canceled_) | 126 if (is_canceled_) |
123 return; | 127 return; |
124 | 128 |
125 // TODO(qinmin): The size of |slices_to_download| should be no larger than | 129 // TODO(qinmin): The size of |slices_to_download| should be no larger than |
126 // |kParallelRequestCount| unless |kParallelRequestCount| is changed after | 130 // |kParallelRequestCount| unless |kParallelRequestCount| is changed after |
127 // a download is interrupted. This could happen if we use finch to config | 131 // a download is interrupted. This could happen if we use finch to config |
128 // the number of parallel requests. | 132 // the number of parallel requests. |
129 // Get the next |kParallelRequestCount - 1| slices and fork | 133 // Get the next |kParallelRequestCount - 1| slices and fork |
130 // new requests. For the remaining slices, they will be handled once some | 134 // new requests. For the remaining slices, they will be handled once some |
131 // of the workers finish their job. | 135 // of the workers finish their job. |
132 DownloadItem::ReceivedSlices slices_to_download; | 136 DownloadItem::ReceivedSlices slices_to_download = |
133 if (download_item_->GetReceivedSlices().empty()) { | 137 FindSlicesToDownload(download_item_->GetReceivedSlices()); |
134 slices_to_download = FindSlicesForRemainingContent( | 138 |
135 initial_request_offset_, content_length_, GetParallelRequestCount()); | 139 DCHECK(!slices_to_download.empty()); |
136 } else { | 140 int64_t first_slice_offset = slices_to_download[0].offset; |
| 141 |
| 142 if (initial_request_offset_ > first_slice_offset) { |
| 143 DVLOG(kVerboseLevel) << "Initial request is after the first slice to" |
| 144 " download."; |
| 145 } |
| 146 |
| 147 // Create more slices for a new download. The initial request may generate |
| 148 // a received slice. If there are holes before |initial_request_offset_|, |
| 149 // don't create more slices. |
| 150 if (slices_to_download.size() <= 1 && |
| 151 initial_request_length_ == DownloadSaveInfo::kLengthFullContent && |
| 152 initial_request_offset_ <= first_slice_offset) { |
137 // TODO(qinmin): Check the size of the last slice. If it is huge, we can | 153 // TODO(qinmin): Check the size of the last slice. If it is huge, we can |
138 // split it into N pieces and pass the last N-1 pieces to different workers. | 154 // split it into N pieces and pass the last N-1 pieces to different workers. |
139 // Otherwise, just fork |slices_to_download.size()| number of workers. | 155 // Otherwise, just fork |slices_to_download.size()| number of workers. |
140 slices_to_download = | 156 slices_to_download = FindSlicesForRemainingContent( |
141 FindSlicesToDownload(download_item_->GetReceivedSlices()); | 157 first_slice_offset, |
| 158 content_length_ - first_slice_offset + initial_request_offset_, |
| 159 GetParallelRequestCount(), GetMinSliceSize()); |
142 } | 160 } |
143 | 161 |
144 if (slices_to_download.empty()) | 162 DCHECK(!slices_to_download.empty()); |
145 return; | |
146 | |
147 DCHECK_EQ(slices_to_download[0].offset, initial_request_offset_); | |
148 DCHECK_EQ(slices_to_download.back().received_bytes, | 163 DCHECK_EQ(slices_to_download.back().received_bytes, |
149 DownloadSaveInfo::kLengthFullContent); | 164 DownloadSaveInfo::kLengthFullContent); |
150 | 165 |
151 // Send requests, does not including the original request. | |
152 ForkSubRequests(slices_to_download); | 166 ForkSubRequests(slices_to_download); |
153 | |
154 requests_sent_ = true; | 167 requests_sent_ = true; |
155 } | 168 } |
156 | 169 |
157 void ParallelDownloadJob::ForkSubRequests( | 170 void ParallelDownloadJob::ForkSubRequests( |
158 const DownloadItem::ReceivedSlices& slices_to_download) { | 171 const DownloadItem::ReceivedSlices& slices_to_download) { |
159 if (slices_to_download.size() < 2) | 172 bool initial_request_skipped = false; |
160 return; | 173 for (auto it = slices_to_download.begin(); it != slices_to_download.end(); |
| 174 ++it) { |
| 175 // Create requests for holes before the |initial_request_offset_|. |
| 176 if (it->offset < initial_request_offset_) { |
| 177 CreateRequest(it->offset, it->received_bytes); |
| 178 continue; |
| 179 } |
161 | 180 |
162 for (auto it = slices_to_download.begin() + 1; it != slices_to_download.end(); | 181 // Assume the first slice to download after |initial_request_offset_| will |
163 ++it) { | 182 // be handled by the initial request. |
164 // received_bytes here is the bytes need to download. | 183 if (initial_request_skipped) |
165 CreateRequest(it->offset, it->received_bytes); | 184 CreateRequest(it->offset, it->received_bytes); |
| 185 else |
| 186 initial_request_skipped = true; |
166 } | 187 } |
167 } | 188 } |
168 | 189 |
169 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { | 190 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { |
170 DCHECK(download_item_); | 191 DCHECK(download_item_); |
171 | 192 |
172 std::unique_ptr<DownloadWorker> worker = | 193 std::unique_ptr<DownloadWorker> worker = |
173 base::MakeUnique<DownloadWorker>(this, offset, length); | 194 base::MakeUnique<DownloadWorker>(this, offset, length); |
174 | 195 |
175 StoragePartition* storage_partition = | 196 StoragePartition* storage_partition = |
(...skipping 16 matching lines...) Expand all Loading... |
192 // download request. | 213 // download request. |
193 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), | 214 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), |
194 blink::WebReferrerPolicyAlways)); | 215 blink::WebReferrerPolicyAlways)); |
195 // Send the request. | 216 // Send the request. |
196 worker->SendRequest(std::move(download_params)); | 217 worker->SendRequest(std::move(download_params)); |
197 DCHECK(workers_.find(offset) == workers_.end()); | 218 DCHECK(workers_.find(offset) == workers_.end()); |
198 workers_[offset] = std::move(worker); | 219 workers_[offset] = std::move(worker); |
199 } | 220 } |
200 | 221 |
201 } // namespace content | 222 } // namespace content |
OLD | NEW |