Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(664)

Side by Side Diff: content/browser/download/parallel_download_job.cc

Issue 2806923002: Don't create parallel request if download is about to complete (Closed)
Patch Set: add UMA and finch Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2017 The Chromium Authors. All rights reserved. 1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/browser/download/parallel_download_job.h" 5 #include "content/browser/download/parallel_download_job.h"
6 6
7 #include <algorithm>
8
7 #include "base/memory/ptr_util.h" 9 #include "base/memory/ptr_util.h"
10 #include "base/metrics/histogram_macros.h"
11 #include "base/time/time.h"
8 #include "content/browser/download/download_create_info.h" 12 #include "content/browser/download/download_create_info.h"
9 #include "content/browser/download/download_stats.h" 13 #include "content/browser/download/download_stats.h"
10 #include "content/browser/download/parallel_download_utils.h" 14 #include "content/browser/download/parallel_download_utils.h"
11 #include "content/public/browser/browser_context.h" 15 #include "content/public/browser/browser_context.h"
12 #include "content/public/browser/storage_partition.h" 16 #include "content/public/browser/storage_partition.h"
13 17
14 namespace content { 18 namespace content {
15 namespace { 19 namespace {
16 20
17 const int kVerboseLevel = 1; 21 const int kVerboseLevel = 1;
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
73 return; 77 return;
74 } 78 }
75 79
76 for (auto& worker : workers_) 80 for (auto& worker : workers_)
77 worker.second->Resume(); 81 worker.second->Resume();
78 } 82 }
79 83
80 int ParallelDownloadJob::GetParallelRequestCount() const { 84 int ParallelDownloadJob::GetParallelRequestCount() const {
81 return GetParallelRequestCountConfig(); 85 return GetParallelRequestCountConfig();
82 } 86 }
87
83 int64_t ParallelDownloadJob::GetMinSliceSize() const { 88 int64_t ParallelDownloadJob::GetMinSliceSize() const {
84 return GetMinSliceSizeConfig(); 89 return GetMinSliceSizeConfig();
85 } 90 }
86 91
92 int ParallelDownloadJob::GetMinRemainingTimeInSeconds() const {
93 return GetParallelRequestRemainingTimeConfig().InSeconds();
94 }
95
87 bool ParallelDownloadJob::UsesParallelRequests() const { 96 bool ParallelDownloadJob::UsesParallelRequests() const {
88 return true; 97 return true;
89 } 98 }
90 99
91 void ParallelDownloadJob::BuildParallelRequestAfterDelay() { 100 void ParallelDownloadJob::BuildParallelRequestAfterDelay() {
92 DCHECK(workers_.empty()); 101 DCHECK(workers_.empty());
93 DCHECK(!requests_sent_); 102 DCHECK(!requests_sent_);
94 DCHECK(!timer_.IsRunning()); 103 DCHECK(!timer_.IsRunning());
95 104
96 timer_.Start(FROM_HERE, GetParallelRequestDelayConfig(), this, 105 timer_.Start(FROM_HERE, GetParallelRequestDelayConfig(), this,
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
142 // of the workers finish their job. 151 // of the workers finish their job.
143 DownloadItem::ReceivedSlices slices_to_download = 152 DownloadItem::ReceivedSlices slices_to_download =
144 FindSlicesToDownload(download_item_->GetReceivedSlices()); 153 FindSlicesToDownload(download_item_->GetReceivedSlices());
145 154
146 DCHECK(!slices_to_download.empty()); 155 DCHECK(!slices_to_download.empty());
147 int64_t first_slice_offset = slices_to_download[0].offset; 156 int64_t first_slice_offset = slices_to_download[0].offset;
148 DCHECK_LE(initial_request_offset_, first_slice_offset); 157 DCHECK_LE(initial_request_offset_, first_slice_offset);
149 158
150 // Create more slices for a new download. The initial request may generate 159 // Create more slices for a new download. The initial request may generate
151 // a received slice. 160 // a received slice.
152 if (slices_to_download.size() <= 1 && 161 if (slices_to_download.size() <= 1 && download_item_->GetTotalBytes() > 0) {
153 initial_request_offset_ <= first_slice_offset) { 162 int64_t current_bytes_per_second =
154 // TODO(qinmin): Check the size of the last slice. If it is huge, we can 163 std::max(static_cast<int64_t>(1), download_item_->CurrentSpeed());
155 // split it into N pieces and pass the last N-1 pieces to different workers. 164 int64_t remaining_bytes =
156 // Otherwise, just fork |slices_to_download.size()| number of workers. 165 download_item_->GetTotalBytes() - download_item_->GetReceivedBytes();
157 slices_to_download = FindSlicesForRemainingContent( 166
158 first_slice_offset, 167 int64_t remaining_time = remaining_bytes / current_bytes_per_second;
159 content_length_ - first_slice_offset + initial_request_offset_, 168 UMA_HISTOGRAM_CUSTOM_COUNTS(
160 GetParallelRequestCount(), GetMinSliceSize()); 169 "Download.ParallelDownload.RemainingTimeWhenBuildingRequests",
170 remaining_time, 0, base::TimeDelta::FromDays(1).InSeconds(), 50);
171 if (remaining_bytes / current_bytes_per_second >
172 GetMinRemainingTimeInSeconds()) {
173 // TODO(qinmin): Check the size of the last slice. If it is huge, we can
174 // split it into N pieces and pass the last N-1 pieces to different
175 // workers. Otherwise, just fork |slices_to_download.size()| number of
176 // workers.
177 slices_to_download = FindSlicesForRemainingContent(
178 first_slice_offset,
179 content_length_ - first_slice_offset + initial_request_offset_,
180 GetParallelRequestCount(), GetMinSliceSize());
181 } else {
182 RecordParallelDownloadCreationEvent(
183 ParallelDownloadCreationEvent::FALLBACK_REASON_REMAINING_TIME);
184 }
161 } 185 }
162 186
163 DCHECK(!slices_to_download.empty()); 187 DCHECK(!slices_to_download.empty());
164 DCHECK_EQ(slices_to_download.back().received_bytes, 188 DCHECK_EQ(slices_to_download.back().received_bytes,
165 DownloadSaveInfo::kLengthFullContent); 189 DownloadSaveInfo::kLengthFullContent);
166 190
167 ForkSubRequests(slices_to_download); 191 ForkSubRequests(slices_to_download);
168 RecordParallelDownloadRequestCount( 192 RecordParallelDownloadRequestCount(
169 static_cast<int>(slices_to_download.size())); 193 static_cast<int>(slices_to_download.size()));
170 requests_sent_ = true; 194 requests_sent_ = true;
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
212 // download request. 236 // download request.
213 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(), 237 download_params->set_referrer(Referrer(download_item_->GetReferrerUrl(),
214 blink::kWebReferrerPolicyAlways)); 238 blink::kWebReferrerPolicyAlways));
215 // Send the request. 239 // Send the request.
216 worker->SendRequest(std::move(download_params)); 240 worker->SendRequest(std::move(download_params));
217 DCHECK(workers_.find(offset) == workers_.end()); 241 DCHECK(workers_.find(offset) == workers_.end());
218 workers_[offset] = std::move(worker); 242 workers_[offset] = std::move(worker);
219 } 243 }
220 244
221 } // namespace content 245 } // namespace content
OLDNEW
« no previous file with comments | « content/browser/download/parallel_download_job.h ('k') | content/browser/download/parallel_download_job_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698