OLD | NEW |
1 // Copyright 2017 The Chromium Authors. All rights reserved. | 1 // Copyright 2017 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/browser/download/parallel_download_job.h" | 5 #include "content/browser/download/parallel_download_job.h" |
6 | 6 |
7 #include "base/memory/ptr_util.h" | 7 #include "base/memory/ptr_util.h" |
8 #include "content/browser/download/download_create_info.h" | 8 #include "content/browser/download/download_create_info.h" |
| 9 #include "content/browser/download/download_stats.h" |
9 #include "content/browser/download/parallel_download_utils.h" | 10 #include "content/browser/download/parallel_download_utils.h" |
10 #include "content/public/browser/browser_context.h" | 11 #include "content/public/browser/browser_context.h" |
11 #include "content/public/browser/storage_partition.h" | 12 #include "content/public/browser/storage_partition.h" |
12 | 13 |
13 namespace content { | 14 namespace content { |
14 namespace { | 15 namespace { |
15 | 16 |
16 const int kVerboseLevel = 1; | 17 const int kVerboseLevel = 1; |
17 | 18 |
18 } // namespace | 19 } // namespace |
19 | 20 |
20 ParallelDownloadJob::ParallelDownloadJob( | 21 ParallelDownloadJob::ParallelDownloadJob( |
21 DownloadItemImpl* download_item, | 22 DownloadItemImpl* download_item, |
22 std::unique_ptr<DownloadRequestHandleInterface> request_handle, | 23 std::unique_ptr<DownloadRequestHandleInterface> request_handle, |
23 const DownloadCreateInfo& create_info) | 24 const DownloadCreateInfo& create_info) |
24 : DownloadJobImpl(download_item, std::move(request_handle)), | 25 : DownloadJobImpl(download_item, std::move(request_handle)), |
25 initial_request_offset_(create_info.offset), | 26 initial_request_offset_(create_info.offset), |
26 initial_request_length_(create_info.length), | |
27 content_length_(create_info.total_bytes), | 27 content_length_(create_info.total_bytes), |
28 requests_sent_(false), | 28 requests_sent_(false), |
29 is_canceled_(false) {} | 29 is_canceled_(false) {} |
30 | 30 |
31 ParallelDownloadJob::~ParallelDownloadJob() = default; | 31 ParallelDownloadJob::~ParallelDownloadJob() = default; |
32 | 32 |
33 void ParallelDownloadJob::Start() { | 33 void ParallelDownloadJob::Start() { |
34 DownloadJobImpl::Start(); | 34 DownloadJobImpl::Start(); |
35 | 35 |
36 BuildParallelRequestAfterDelay(); | 36 BuildParallelRequestAfterDelay(); |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
95 | 95 |
96 timer_.Start(FROM_HERE, GetParallelRequestDelayConfig(), this, | 96 timer_.Start(FROM_HERE, GetParallelRequestDelayConfig(), this, |
97 &ParallelDownloadJob::BuildParallelRequests); | 97 &ParallelDownloadJob::BuildParallelRequests); |
98 } | 98 } |
99 | 99 |
100 void ParallelDownloadJob::OnByteStreamReady( | 100 void ParallelDownloadJob::OnByteStreamReady( |
101 DownloadWorker* worker, | 101 DownloadWorker* worker, |
102 std::unique_ptr<ByteStreamReader> stream_reader) { | 102 std::unique_ptr<ByteStreamReader> stream_reader) { |
103 bool success = DownloadJob::AddByteStream(std::move(stream_reader), | 103 bool success = DownloadJob::AddByteStream(std::move(stream_reader), |
104 worker->offset(), worker->length()); | 104 worker->offset(), worker->length()); |
| 105 RecordParallelDownloadAddStreamSuccess(success); |
105 | 106 |
106 // Destroy the request if the sink is gone. | 107 // Destroy the request if the sink is gone. |
107 if (!success) { | 108 if (!success) { |
108 VLOG(kVerboseLevel) | 109 VLOG(kVerboseLevel) |
109 << "Byte stream arrived after download file is released."; | 110 << "Byte stream arrived after download file is released."; |
110 worker->Cancel(); | 111 worker->Cancel(); |
111 } | 112 } |
112 } | 113 } |
113 | 114 |
114 void ParallelDownloadJob::OnServerResponseError( | 115 void ParallelDownloadJob::OnServerResponseError( |
(...skipping 16 matching lines...) Expand all Loading... |
131 // a download is interrupted. This could happen if we use finch to config | 132 // a download is interrupted. This could happen if we use finch to config |
132 // the number of parallel requests. | 133 // the number of parallel requests. |
133 // Get the next |kParallelRequestCount - 1| slices and fork | 134 // Get the next |kParallelRequestCount - 1| slices and fork |
134 // new requests. For the remaining slices, they will be handled once some | 135 // new requests. For the remaining slices, they will be handled once some |
135 // of the workers finish their job. | 136 // of the workers finish their job. |
136 DownloadItem::ReceivedSlices slices_to_download = | 137 DownloadItem::ReceivedSlices slices_to_download = |
137 FindSlicesToDownload(download_item_->GetReceivedSlices()); | 138 FindSlicesToDownload(download_item_->GetReceivedSlices()); |
138 | 139 |
139 DCHECK(!slices_to_download.empty()); | 140 DCHECK(!slices_to_download.empty()); |
140 int64_t first_slice_offset = slices_to_download[0].offset; | 141 int64_t first_slice_offset = slices_to_download[0].offset; |
141 | 142 DCHECK_LE(initial_request_offset_, first_slice_offset); |
142 if (initial_request_offset_ > first_slice_offset) { | |
143 DVLOG(kVerboseLevel) << "Initial request is after the first slice to" | |
144 " download."; | |
145 } | |
146 | 143 |
147 // Create more slices for a new download. The initial request may generate | 144 // Create more slices for a new download. The initial request may generate |
148 // a received slice. If there are holes before |initial_request_offset_|, | 145 // a received slice. |
149 // don't create more slices. | |
150 if (slices_to_download.size() <= 1 && | 146 if (slices_to_download.size() <= 1 && |
151 initial_request_length_ == DownloadSaveInfo::kLengthFullContent && | |
152 initial_request_offset_ <= first_slice_offset) { | 147 initial_request_offset_ <= first_slice_offset) { |
153 // TODO(qinmin): Check the size of the last slice. If it is huge, we can | 148 // TODO(qinmin): Check the size of the last slice. If it is huge, we can |
154 // split it into N pieces and pass the last N-1 pieces to different workers. | 149 // split it into N pieces and pass the last N-1 pieces to different workers. |
155 // Otherwise, just fork |slices_to_download.size()| number of workers. | 150 // Otherwise, just fork |slices_to_download.size()| number of workers. |
156 slices_to_download = FindSlicesForRemainingContent( | 151 slices_to_download = FindSlicesForRemainingContent( |
157 first_slice_offset, | 152 first_slice_offset, |
158 content_length_ - first_slice_offset + initial_request_offset_, | 153 content_length_ - first_slice_offset + initial_request_offset_, |
159 GetParallelRequestCount(), GetMinSliceSize()); | 154 GetParallelRequestCount(), GetMinSliceSize()); |
160 } | 155 } |
161 | 156 |
162 DCHECK(!slices_to_download.empty()); | 157 DCHECK(!slices_to_download.empty()); |
163 DCHECK_EQ(slices_to_download.back().received_bytes, | 158 DCHECK_EQ(slices_to_download.back().received_bytes, |
164 DownloadSaveInfo::kLengthFullContent); | 159 DownloadSaveInfo::kLengthFullContent); |
165 | 160 |
166 ForkSubRequests(slices_to_download); | 161 ForkSubRequests(slices_to_download); |
| 162 RecordParallelDownloadRequestCount( |
| 163 static_cast<int>(slices_to_download.size())); |
167 requests_sent_ = true; | 164 requests_sent_ = true; |
168 } | 165 } |
169 | 166 |
170 void ParallelDownloadJob::ForkSubRequests( | 167 void ParallelDownloadJob::ForkSubRequests( |
171 const DownloadItem::ReceivedSlices& slices_to_download) { | 168 const DownloadItem::ReceivedSlices& slices_to_download) { |
172 bool initial_request_skipped = false; | 169 if (slices_to_download.size() < 2) |
173 for (auto it = slices_to_download.begin(); it != slices_to_download.end(); | 170 return; |
| 171 |
| 172 // Assume the first slice to download will be handled by the initial request. |
| 173 for (auto it = slices_to_download.begin() + 1; it != slices_to_download.end(); |
174 ++it) { | 174 ++it) { |
175 // Create requests for holes before the |initial_request_offset_|. | 175 DCHECK_GE(it->offset, initial_request_offset_); |
176 if (it->offset < initial_request_offset_) { | 176 CreateRequest(it->offset, it->received_bytes); |
177 CreateRequest(it->offset, it->received_bytes); | |
178 continue; | |
179 } | |
180 | |
181 // Assume the first slice to download after |initial_request_offset_| will | |
182 // be handled by the initial request. | |
183 if (initial_request_skipped) | |
184 CreateRequest(it->offset, it->received_bytes); | |
185 else | |
186 initial_request_skipped = true; | |
187 } | 177 } |
188 } | 178 } |
189 | 179 |
190 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { | 180 void ParallelDownloadJob::CreateRequest(int64_t offset, int64_t length) { |
191 DCHECK(download_item_); | 181 DCHECK(download_item_); |
192 | 182 |
193 std::unique_ptr<DownloadWorker> worker = | 183 std::unique_ptr<DownloadWorker> worker = |
194 base::MakeUnique<DownloadWorker>(this, offset, length); | 184 base::MakeUnique<DownloadWorker>(this, offset, length); |
195 | 185 |
196 StoragePartition* storage_partition = | 186 StoragePartition* storage_partition = |
(...skipping 16 matching lines...) Expand all Loading... |
213 // download request. | 203 // download request. |
214 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), | 204 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), |
215 blink::WebReferrerPolicyAlways)); | 205 blink::WebReferrerPolicyAlways)); |
216 // Send the request. | 206 // Send the request. |
217 worker->SendRequest(std::move(download_params)); | 207 worker->SendRequest(std::move(download_params)); |
218 DCHECK(workers_.find(offset) == workers_.end()); | 208 DCHECK(workers_.find(offset) == workers_.end()); |
219 workers_[offset] = std::move(worker); | 209 workers_[offset] = std::move(worker); |
220 } | 210 } |
221 | 211 |
222 } // namespace content | 212 } // namespace content |
OLD | NEW |