OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/browser/download/download_file_impl.h" | 5 #include "content/browser/download/download_file_impl.h" |
6 | 6 |
7 #include <string> | 7 #include <string> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
85 length_ = offset - offset_; | 85 length_ = offset - offset_; |
86 } | 86 } |
87 } | 87 } |
88 | 88 |
89 DownloadFileImpl::DownloadFileImpl( | 89 DownloadFileImpl::DownloadFileImpl( |
90 std::unique_ptr<DownloadSaveInfo> save_info, | 90 std::unique_ptr<DownloadSaveInfo> save_info, |
91 const base::FilePath& default_download_directory, | 91 const base::FilePath& default_download_directory, |
92 std::unique_ptr<ByteStreamReader> stream_reader, | 92 std::unique_ptr<ByteStreamReader> stream_reader, |
93 const std::vector<DownloadItem::ReceivedSlice>& received_slices, | 93 const std::vector<DownloadItem::ReceivedSlice>& received_slices, |
94 const net::NetLogWithSource& download_item_net_log, | 94 const net::NetLogWithSource& download_item_net_log, |
95 bool is_sparse_file, | |
96 base::WeakPtr<DownloadDestinationObserver> observer) | 95 base::WeakPtr<DownloadDestinationObserver> observer) |
97 : net_log_( | 96 : net_log_( |
98 net::NetLogWithSource::Make(download_item_net_log.net_log(), | 97 net::NetLogWithSource::Make(download_item_net_log.net_log(), |
99 net::NetLogSourceType::DOWNLOAD_FILE)), | 98 net::NetLogSourceType::DOWNLOAD_FILE)), |
100 file_(net_log_), | 99 file_(net_log_), |
101 save_info_(std::move(save_info)), | 100 save_info_(std::move(save_info)), |
102 default_download_directory_(default_download_directory), | 101 default_download_directory_(default_download_directory), |
103 is_sparse_file_(is_sparse_file), | |
104 bytes_seen_(0), | 102 bytes_seen_(0), |
105 num_active_streams_(0), | 103 num_active_streams_(0), |
106 record_stream_bandwidth_(true), | 104 record_stream_bandwidth_(true), |
107 bytes_seen_with_parallel_streams_(0), | 105 bytes_seen_with_parallel_streams_(0), |
108 bytes_seen_without_parallel_streams_(0), | 106 bytes_seen_without_parallel_streams_(0), |
109 received_slices_(received_slices), | 107 received_slices_(received_slices), |
110 observer_(observer), | 108 observer_(observer), |
111 weak_factory_(this) { | 109 weak_factory_(this) { |
112 source_streams_[save_info_->offset] = base::MakeUnique<SourceStream>( | 110 source_streams_[save_info_->offset] = base::MakeUnique<SourceStream>( |
113 save_info_->offset, save_info_->length, std::move(stream_reader)); | 111 save_info_->offset, save_info_->length, std::move(stream_reader)); |
114 | 112 |
115 download_item_net_log.AddEvent( | 113 download_item_net_log.AddEvent( |
116 net::NetLogEventType::DOWNLOAD_FILE_CREATED, | 114 net::NetLogEventType::DOWNLOAD_FILE_CREATED, |
117 net_log_.source().ToEventParametersCallback()); | 115 net_log_.source().ToEventParametersCallback()); |
118 net_log_.BeginEvent( | 116 net_log_.BeginEvent( |
119 net::NetLogEventType::DOWNLOAD_FILE_ACTIVE, | 117 net::NetLogEventType::DOWNLOAD_FILE_ACTIVE, |
120 download_item_net_log.source().ToEventParametersCallback()); | 118 download_item_net_log.source().ToEventParametersCallback()); |
121 } | 119 } |
122 | 120 |
123 DownloadFileImpl::~DownloadFileImpl() { | 121 DownloadFileImpl::~DownloadFileImpl() { |
124 DCHECK_CURRENTLY_ON(BrowserThread::FILE); | 122 DCHECK_CURRENTLY_ON(BrowserThread::FILE); |
125 net_log_.EndEvent(net::NetLogEventType::DOWNLOAD_FILE_ACTIVE); | 123 net_log_.EndEvent(net::NetLogEventType::DOWNLOAD_FILE_ACTIVE); |
126 } | 124 } |
127 | 125 |
128 void DownloadFileImpl::Initialize(const InitializeCallback& callback) { | 126 void DownloadFileImpl::Initialize(const InitializeCallback& callback) { |
129 DCHECK_CURRENTLY_ON(BrowserThread::FILE); | 127 DCHECK_CURRENTLY_ON(BrowserThread::FILE); |
130 | 128 |
131 update_timer_.reset(new base::RepeatingTimer()); | 129 update_timer_.reset(new base::RepeatingTimer()); |
132 int64_t bytes_so_far = 0; | 130 int64_t bytes_so_far = 0; |
133 if (is_sparse_file_) { | 131 if (IsSparseFile()) { |
134 for (const auto& received_slice : received_slices_) { | 132 for (const auto& received_slice : received_slices_) { |
135 bytes_so_far += received_slice.received_bytes; | 133 bytes_so_far += received_slice.received_bytes; |
136 } | 134 } |
137 } else { | 135 } else { |
138 bytes_so_far = save_info_->offset; | 136 bytes_so_far = save_info_->offset; |
139 } | 137 } |
140 DownloadInterruptReason result = | 138 DownloadInterruptReason result = file_.Initialize( |
141 file_.Initialize(save_info_->file_path, default_download_directory_, | 139 save_info_->file_path, default_download_directory_, |
142 std::move(save_info_->file), bytes_so_far, | 140 std::move(save_info_->file), bytes_so_far, |
143 save_info_->hash_of_partial_file, | 141 save_info_->hash_of_partial_file, std::move(save_info_->hash_state), |
144 std::move(save_info_->hash_state), is_sparse_file_); | 142 IsSparseFile()); |
145 if (result != DOWNLOAD_INTERRUPT_REASON_NONE) { | 143 if (result != DOWNLOAD_INTERRUPT_REASON_NONE) { |
146 BrowserThread::PostTask( | 144 BrowserThread::PostTask( |
147 BrowserThread::UI, FROM_HERE, base::Bind(callback, result)); | 145 BrowserThread::UI, FROM_HERE, base::Bind(callback, result)); |
148 return; | 146 return; |
149 } | 147 } |
150 | 148 |
151 download_start_ = base::TimeTicks::Now(); | 149 download_start_ = base::TimeTicks::Now(); |
152 last_update_time_ = download_start_; | 150 last_update_time_ = download_start_; |
153 | 151 |
154 // Primarily to make reset to zero in restart visible to owner. | 152 // Primarily to make reset to zero in restart visible to owner. |
(...skipping 10 matching lines...) Expand all Loading... | |
165 | 163 |
166 void DownloadFileImpl::AddByteStream( | 164 void DownloadFileImpl::AddByteStream( |
167 std::unique_ptr<ByteStreamReader> stream_reader, | 165 std::unique_ptr<ByteStreamReader> stream_reader, |
168 int64_t offset, | 166 int64_t offset, |
169 int64_t length) { | 167 int64_t length) { |
170 DCHECK_CURRENTLY_ON(BrowserThread::FILE); | 168 DCHECK_CURRENTLY_ON(BrowserThread::FILE); |
171 | 169 |
172 source_streams_[offset] = | 170 source_streams_[offset] = |
173 base::MakeUnique<SourceStream>(offset, length, std::move(stream_reader)); | 171 base::MakeUnique<SourceStream>(offset, length, std::move(stream_reader)); |
174 | 172 |
173 // There are writers at different offsets now, create the received slices | |
174 // vector if necessary. | |
175 if (received_slices_.empty() && TotalBytesReceived() > 0) { | |
176 size_t index = AddOrMergeReceivedSliceIntoSortedArray( | |
177 DownloadItem::ReceivedSlice(0, TotalBytesReceived()), received_slices_); | |
178 DCHECK_EQ(index, 0u); | |
179 } | |
175 // If the file is initialized, start to write data, or wait until file opened. | 180 // If the file is initialized, start to write data, or wait until file opened. |
176 if (file_.in_progress()) | 181 if (file_.in_progress()) |
177 RegisterAndActivateStream(source_streams_[offset].get()); | 182 RegisterAndActivateStream(source_streams_[offset].get()); |
178 } | 183 } |
179 | 184 |
180 DownloadInterruptReason DownloadFileImpl::WriteDataToFile(int64_t offset, | 185 DownloadInterruptReason DownloadFileImpl::WriteDataToFile(int64_t offset, |
181 const char* data, | 186 const char* data, |
182 size_t data_len) { | 187 size_t data_len) { |
183 DCHECK_CURRENTLY_ON(BrowserThread::FILE); | 188 DCHECK_CURRENTLY_ON(BrowserThread::FILE); |
184 WillWriteToDisk(data_len); | 189 WillWriteToDisk(data_len); |
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
383 DCHECK_GE(incoming_data_size, bytes_to_write); | 388 DCHECK_GE(incoming_data_size, bytes_to_write); |
384 reason = WriteDataToFile( | 389 reason = WriteDataToFile( |
385 source_stream->offset() + source_stream->bytes_written(), | 390 source_stream->offset() + source_stream->bytes_written(), |
386 incoming_data.get()->data(), bytes_to_write); | 391 incoming_data.get()->data(), bytes_to_write); |
387 disk_writes_time_ += (base::TimeTicks::Now() - write_start); | 392 disk_writes_time_ += (base::TimeTicks::Now() - write_start); |
388 bytes_seen_ += bytes_to_write; | 393 bytes_seen_ += bytes_to_write; |
389 total_incoming_data_size += bytes_to_write; | 394 total_incoming_data_size += bytes_to_write; |
390 if (reason == DOWNLOAD_INTERRUPT_REASON_NONE) { | 395 if (reason == DOWNLOAD_INTERRUPT_REASON_NONE) { |
391 int64_t prev_bytes_written = source_stream->bytes_written(); | 396 int64_t prev_bytes_written = source_stream->bytes_written(); |
392 source_stream->OnWriteBytesToDisk(bytes_to_write); | 397 source_stream->OnWriteBytesToDisk(bytes_to_write); |
393 if (!is_sparse_file_) | 398 if (!IsSparseFile()) |
394 break; | 399 break; |
395 // If the write operation creates a new slice, add it to the | 400 // If the write operation creates a new slice, add it to the |
396 // |received_slices_| and update all the entries in | 401 // |received_slices_| and update all the entries in |
397 // |source_streams_|. | 402 // |source_streams_|. |
398 if (bytes_to_write > 0 && prev_bytes_written == 0) { | 403 if (bytes_to_write > 0 && prev_bytes_written == 0) { |
399 AddNewSlice(source_stream->offset(), bytes_to_write); | 404 AddNewSlice(source_stream->offset(), bytes_to_write); |
xingliu
2017/03/29 20:14:02
nit%: I might not fully understand the logic here
qinmin
2017/03/29 20:44:08
Shouldn't AddByteStream only called on parallel re
xingliu
2017/03/29 21:06:28
Yeah, I mean (bytes_to_write > 0 && prev_bytes_wri
| |
400 } else { | 405 } else { |
401 received_slices_[source_stream->index()].received_bytes += | 406 received_slices_[source_stream->index()].received_bytes += |
402 bytes_to_write; | 407 bytes_to_write; |
403 } | 408 } |
404 } | 409 } |
405 } | 410 } |
406 break; | 411 break; |
407 case ByteStreamReader::STREAM_COMPLETE: | 412 case ByteStreamReader::STREAM_COMPLETE: |
408 { | 413 { |
409 reason = static_cast<DownloadInterruptReason>( | 414 reason = static_cast<DownloadInterruptReason>( |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
443 source_stream->set_finished(true); | 448 source_stream->set_finished(true); |
444 num_active_streams_--; | 449 num_active_streams_--; |
445 | 450 |
446 // Inform observers. | 451 // Inform observers. |
447 SendUpdate(); | 452 SendUpdate(); |
448 | 453 |
449 // All the stream reader are completed, shut down file IO processing. | 454 // All the stream reader are completed, shut down file IO processing. |
450 if (IsDownloadCompleted()) { | 455 if (IsDownloadCompleted()) { |
451 RecordFileBandwidth(bytes_seen_, disk_writes_time_, | 456 RecordFileBandwidth(bytes_seen_, disk_writes_time_, |
452 base::TimeTicks::Now() - download_start_); | 457 base::TimeTicks::Now() - download_start_); |
453 if (is_sparse_file_ && record_stream_bandwidth_) { | 458 if (IsSparseFile() && record_stream_bandwidth_) { |
454 RecordParallelDownloadStats(bytes_seen_with_parallel_streams_, | 459 RecordParallelDownloadStats(bytes_seen_with_parallel_streams_, |
455 download_time_with_parallel_streams_, | 460 download_time_with_parallel_streams_, |
456 bytes_seen_without_parallel_streams_, | 461 bytes_seen_without_parallel_streams_, |
457 download_time_without_parallel_streams_); | 462 download_time_without_parallel_streams_); |
458 } | 463 } |
459 weak_factory_.InvalidateWeakPtrs(); | 464 weak_factory_.InvalidateWeakPtrs(); |
460 std::unique_ptr<crypto::SecureHash> hash_state = file_.Finish(); | 465 std::unique_ptr<crypto::SecureHash> hash_state = file_.Finish(); |
461 update_timer_.reset(); | 466 update_timer_.reset(); |
462 BrowserThread::PostTask( | 467 BrowserThread::PostTask( |
463 BrowserThread::UI, FROM_HERE, | 468 BrowserThread::UI, FROM_HERE, |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
504 received_slices_)); | 509 received_slices_)); |
505 } | 510 } |
506 | 511 |
507 void DownloadFileImpl::WillWriteToDisk(size_t data_len) { | 512 void DownloadFileImpl::WillWriteToDisk(size_t data_len) { |
508 if (!update_timer_->IsRunning()) { | 513 if (!update_timer_->IsRunning()) { |
509 update_timer_->Start(FROM_HERE, | 514 update_timer_->Start(FROM_HERE, |
510 base::TimeDelta::FromMilliseconds(kUpdatePeriodMs), | 515 base::TimeDelta::FromMilliseconds(kUpdatePeriodMs), |
511 this, &DownloadFileImpl::SendUpdate); | 516 this, &DownloadFileImpl::SendUpdate); |
512 } | 517 } |
513 rate_estimator_.Increment(data_len); | 518 rate_estimator_.Increment(data_len); |
514 if (is_sparse_file_) { | 519 base::TimeTicks now = base::TimeTicks::Now(); |
515 base::TimeTicks now = base::TimeTicks::Now(); | 520 base::TimeDelta time_elapsed = (now - last_update_time_); |
516 base::TimeDelta time_elapsed = (now - last_update_time_); | 521 last_update_time_ = now; |
517 last_update_time_ = now; | 522 if (num_active_streams_ > 1) { |
518 if (num_active_streams_ > 1) { | 523 download_time_with_parallel_streams_ += time_elapsed; |
519 download_time_with_parallel_streams_ += time_elapsed; | 524 bytes_seen_with_parallel_streams_ += data_len; |
520 bytes_seen_with_parallel_streams_ += data_len; | 525 } else { |
521 } else { | 526 download_time_without_parallel_streams_ += time_elapsed; |
522 download_time_without_parallel_streams_ += time_elapsed; | 527 bytes_seen_without_parallel_streams_ += data_len; |
523 bytes_seen_without_parallel_streams_ += data_len; | |
524 } | |
525 } | 528 } |
526 } | 529 } |
527 | 530 |
528 void DownloadFileImpl::AddNewSlice(int64_t offset, int64_t length) { | 531 void DownloadFileImpl::AddNewSlice(int64_t offset, int64_t length) { |
529 if (!is_sparse_file_) | |
530 return; | |
531 size_t index = AddOrMergeReceivedSliceIntoSortedArray( | 532 size_t index = AddOrMergeReceivedSliceIntoSortedArray( |
532 DownloadItem::ReceivedSlice(offset, length), received_slices_); | 533 DownloadItem::ReceivedSlice(offset, length), received_slices_); |
533 // Check if the slice is added as a new slice, or merged with an existing one. | 534 // Check if the slice is added as a new slice, or merged with an existing one. |
534 bool slice_added = (offset == received_slices_[index].offset); | 535 bool slice_added = (offset == received_slices_[index].offset); |
535 // Update the index of exising SourceStreams. | 536 // Update the index of exising SourceStreams. |
536 for (auto& stream : source_streams_) { | 537 for (auto& stream : source_streams_) { |
537 SourceStream* source_stream = stream.second.get(); | 538 SourceStream* source_stream = stream.second.get(); |
538 if (source_stream->offset() > offset) { | 539 if (source_stream->offset() > offset) { |
539 if (slice_added && source_stream->bytes_written() > 0) | 540 if (slice_added && source_stream->bytes_written() > 0) |
540 source_stream->set_index(source_stream->index() + 1); | 541 source_stream->set_index(source_stream->index() + 1); |
(...skipping 12 matching lines...) Expand all Loading... | |
553 SourceStream* source_stream = stream.second.get(); | 554 SourceStream* source_stream = stream.second.get(); |
554 if (source_stream->offset() >= last_slice_offset && | 555 if (source_stream->offset() >= last_slice_offset && |
555 source_stream->bytes_written() > 0) { | 556 source_stream->bytes_written() > 0) { |
556 stream_for_last_slice = source_stream; | 557 stream_for_last_slice = source_stream; |
557 last_slice_offset = source_stream->offset(); | 558 last_slice_offset = source_stream->offset(); |
558 } | 559 } |
559 if (!source_stream->is_finished()) | 560 if (!source_stream->is_finished()) |
560 return false; | 561 return false; |
561 } | 562 } |
562 | 563 |
563 if (!is_sparse_file_) | 564 if (!IsSparseFile()) |
564 return true; | 565 return true; |
565 | 566 |
566 // Verify that all the file slices have been downloaded. | 567 // Verify that all the file slices have been downloaded. |
567 std::vector<DownloadItem::ReceivedSlice> slices_to_download = | 568 std::vector<DownloadItem::ReceivedSlice> slices_to_download = |
568 FindSlicesToDownload(received_slices_); | 569 FindSlicesToDownload(received_slices_); |
569 if (slices_to_download.size() > 1) { | 570 if (slices_to_download.size() > 1) { |
570 // If there are 1 or more holes in the file, download is not finished. | 571 // If there are 1 or more holes in the file, download is not finished. |
571 // Some streams might not have been added to |source_streams_| yet. | 572 // Some streams might not have been added to |source_streams_| yet. |
572 return false; | 573 return false; |
573 } | 574 } |
(...skipping 16 matching lines...) Expand all Loading... | |
590 | 591 |
591 void DownloadFileImpl::HandleStreamError(SourceStream* source_stream, | 592 void DownloadFileImpl::HandleStreamError(SourceStream* source_stream, |
592 DownloadInterruptReason reason) { | 593 DownloadInterruptReason reason) { |
593 DCHECK_CURRENTLY_ON(BrowserThread::FILE); | 594 DCHECK_CURRENTLY_ON(BrowserThread::FILE); |
594 source_stream->stream_reader()->RegisterCallback(base::Closure()); | 595 source_stream->stream_reader()->RegisterCallback(base::Closure()); |
595 source_stream->set_finished(true); | 596 source_stream->set_finished(true); |
596 num_active_streams_--; | 597 num_active_streams_--; |
597 | 598 |
598 bool can_recover_from_error = false; | 599 bool can_recover_from_error = false; |
599 | 600 |
600 if (is_sparse_file_ && source_stream->length() != kNoBytesToWrite) { | 601 if (IsSparseFile() && source_stream->length() != kNoBytesToWrite) { |
601 // If a neighboring stream request is available, check if it can help | 602 // If a neighboring stream request is available, check if it can help |
602 // download all the data left by |source stream| or has already done so. We | 603 // download all the data left by |source stream| or has already done so. We |
603 // want to avoid the situation that a server always fail additional requests | 604 // want to avoid the situation that a server always fail additional requests |
604 // from the client thus causing the initial request and the download going | 605 // from the client thus causing the initial request and the download going |
605 // nowhere. | 606 // nowhere. |
606 // TODO(qinmin): make all streams half open so that they can recover | 607 // TODO(qinmin): make all streams half open so that they can recover |
607 // failures from their neighbors. | 608 // failures from their neighbors. |
608 SourceStream* preceding_neighbor = FindPrecedingNeighbor(source_stream); | 609 SourceStream* preceding_neighbor = FindPrecedingNeighbor(source_stream); |
609 while (preceding_neighbor) { | 610 while (preceding_neighbor) { |
610 int64_t upper_range = source_stream->offset() + source_stream->length(); | 611 int64_t upper_range = source_stream->offset() + source_stream->length(); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
649 // Our observer will clean us up. | 650 // Our observer will clean us up. |
650 weak_factory_.InvalidateWeakPtrs(); | 651 weak_factory_.InvalidateWeakPtrs(); |
651 std::unique_ptr<crypto::SecureHash> hash_state = file_.Finish(); | 652 std::unique_ptr<crypto::SecureHash> hash_state = file_.Finish(); |
652 BrowserThread::PostTask( | 653 BrowserThread::PostTask( |
653 BrowserThread::UI, FROM_HERE, | 654 BrowserThread::UI, FROM_HERE, |
654 base::Bind(&DownloadDestinationObserver::DestinationError, observer_, | 655 base::Bind(&DownloadDestinationObserver::DestinationError, observer_, |
655 reason, TotalBytesReceived(), base::Passed(&hash_state))); | 656 reason, TotalBytesReceived(), base::Passed(&hash_state))); |
656 } | 657 } |
657 } | 658 } |
658 | 659 |
660 bool DownloadFileImpl::IsSparseFile() const { | |
661 return source_streams_.size() > 1 || !received_slices_.empty(); | |
662 } | |
663 | |
659 DownloadFileImpl::SourceStream* DownloadFileImpl::FindPrecedingNeighbor( | 664 DownloadFileImpl::SourceStream* DownloadFileImpl::FindPrecedingNeighbor( |
660 SourceStream* source_stream) { | 665 SourceStream* source_stream) { |
661 int64_t max_preceding_offset = 0; | 666 int64_t max_preceding_offset = 0; |
662 SourceStream* ret = nullptr; | 667 SourceStream* ret = nullptr; |
663 for (auto& stream : source_streams_) { | 668 for (auto& stream : source_streams_) { |
664 int64_t offset = stream.second->offset(); | 669 int64_t offset = stream.second->offset(); |
665 if (offset < source_stream->offset() && offset >= max_preceding_offset) { | 670 if (offset < source_stream->offset() && offset >= max_preceding_offset) { |
666 ret = stream.second.get(); | 671 ret = stream.second.get(); |
667 max_preceding_offset = offset; | 672 max_preceding_offset = offset; |
668 } | 673 } |
(...skipping 19 matching lines...) Expand all Loading... | |
688 const base::FilePath& new_path, | 693 const base::FilePath& new_path, |
689 const RenameCompletionCallback& completion_callback) | 694 const RenameCompletionCallback& completion_callback) |
690 : option(option), | 695 : option(option), |
691 new_path(new_path), | 696 new_path(new_path), |
692 retries_left(kMaxRenameRetries), | 697 retries_left(kMaxRenameRetries), |
693 completion_callback(completion_callback) {} | 698 completion_callback(completion_callback) {} |
694 | 699 |
695 DownloadFileImpl::RenameParameters::~RenameParameters() {} | 700 DownloadFileImpl::RenameParameters::~RenameParameters() {} |
696 | 701 |
697 } // namespace content | 702 } // namespace content |
OLD | NEW |