Index: content/browser/download/base_file.cc |
diff --git a/content/browser/download/base_file.cc b/content/browser/download/base_file.cc |
index 9d55828f5ecbe0e73b9138239899601c7a0fed07..94664c0ad05b06dd16d8754d64b53a317ac131e3 100644 |
--- a/content/browser/download/base_file.cc |
+++ b/content/browser/download/base_file.cc |
@@ -296,6 +296,19 @@ DownloadInterruptReason BaseFile::Open() { |
file_stream_->SetBoundNetLogSource(bound_net_log_); |
} |
+ int64 file_size = file_stream_->SeekSync(net::FROM_END, 0); |
asanka
2012/12/28 22:01:42
Wouldn't we need to slurp in the contents of the f
Randy Smith (Not in Mondays)
2012/12/29 17:16:16
Depending on the nature of the filesystem and the
asanka
2013/01/04 22:54:16
Ok by me. Can you add a TODO here?
|
+ if (file_size > bytes_so_far_) { |
+ // The file is larger than we expected. |
+ // This is OK, as long as we don't use the extra. |
+ // Truncate the file. |
+ int64 truncate_result = file_stream_->Truncate(bytes_so_far_); |
+ DCHECK_EQ(bytes_so_far_, truncate_result); |
asanka
2012/12/28 22:01:42
This could mean an error. We should handle it inst
Randy Smith (Not in Mondays)
2012/12/29 17:16:16
Done (though the DCHECK's still there if it's not
|
+ } else if (file_size < bytes_so_far_) { |
+ // The file is shorter than we expected. Our hashes won't be valid. |
+ return LogInterruptReason("Unable to seek to last written point", 0, |
+ DOWNLOAD_INTERRUPT_REASON_FILE_TOO_SHORT); |
+ } |
+ |
return DOWNLOAD_INTERRUPT_REASON_NONE; |
} |