| Index: content/browser/download/base_file.cc | 
| diff --git a/content/browser/download/base_file.cc b/content/browser/download/base_file.cc | 
| index 59dfa839cd51dca2373f2b6204d5a64c418fb41c..a24a3d78a96d14e0534ca7ed966ba9a6ea8ee8fa 100644 | 
| --- a/content/browser/download/base_file.cc | 
| +++ b/content/browser/download/base_file.cc | 
| @@ -42,9 +42,10 @@ DownloadInterruptReason BaseFile::Initialize( | 
| const base::FilePath& full_path, | 
| const base::FilePath& default_directory, | 
| base::File file, | 
| -    int64_t bytes_so_far, | 
| +    int64_t offset, | 
| const std::string& hash_so_far, | 
| -    std::unique_ptr<crypto::SecureHash> hash_state) { | 
| +    std::unique_ptr<crypto::SecureHash> hash_state, | 
| +    AccessMode access_mode) { | 
| DCHECK_CURRENTLY_ON(BrowserThread::FILE); | 
| DCHECK(!detached_); | 
|  | 
| @@ -68,15 +69,18 @@ DownloadInterruptReason BaseFile::Initialize( | 
| full_path_ = full_path; | 
| } | 
|  | 
| -  bytes_so_far_ = bytes_so_far; | 
| +  access_mode_ = access_mode; | 
| +  offset_ = offset; | 
| +  if (access_mode_ == EXCLUSIVE) | 
| +    bytes_so_far_ = offset; | 
| secure_hash_ = std::move(hash_state); | 
| file_ = std::move(file); | 
|  | 
| return Open(hash_so_far); | 
| } | 
|  | 
| -DownloadInterruptReason BaseFile::AppendDataToFile(const char* data, | 
| -                                                   size_t data_len) { | 
| +DownloadInterruptReason BaseFile::WriteDataToFile(const char* data, | 
| +                                                  size_t data_len) { | 
| DCHECK_CURRENTLY_ON(BrowserThread::FILE); | 
| DCHECK(!detached_); | 
|  | 
| @@ -112,6 +116,7 @@ DownloadInterruptReason BaseFile::AppendDataToFile(const char* data, | 
| DCHECK_LE(write_size, len); | 
| len -= write_size; | 
| current_data += write_size; | 
| +    offset_ += write_size; | 
| bytes_so_far_ += write_size; | 
| } | 
| net_log_.EndEvent(net::NetLogEventType::DOWNLOAD_FILE_WRITTEN, | 
| @@ -195,17 +200,18 @@ std::string BaseFile::DebugString() const { | 
| "{ " | 
| " full_path_ = \"%" PRFilePath | 
| "\"" | 
| -      " bytes_so_far_ = %" PRId64 " detached_ = %c }", | 
| -      full_path_.value().c_str(), | 
| -      bytes_so_far_, | 
| -      detached_ ? 'T' : 'F'); | 
| +      " offset_ = %" PRId64 " detached_ = %c }", | 
| +      full_path_.value().c_str(), offset_, detached_ ? 'T' : 'F'); | 
| } | 
|  | 
| DownloadInterruptReason BaseFile::CalculatePartialHash( | 
| const std::string& hash_to_expect) { | 
| secure_hash_ = crypto::SecureHash::Create(crypto::SecureHash::SHA256); | 
|  | 
| -  if (bytes_so_far_ == 0) | 
| +  if (access_mode_ == SHARED) | 
| +    return DOWNLOAD_INTERRUPT_REASON_NONE; | 
| + | 
| +  if (offset_ == 0) | 
| return DOWNLOAD_INTERRUPT_REASON_NONE; | 
|  | 
| if (file_.Seek(base::File::FROM_BEGIN, 0) != 0) | 
| @@ -220,17 +226,16 @@ DownloadInterruptReason BaseFile::CalculatePartialHash( | 
| // The size of the buffer is: | 
| // - at least kMinBufferSize so that we can use it to hold the hash as well. | 
| // - at most kMaxBufferSize so that there's a reasonable bound. | 
| -  // - not larger than |bytes_so_far_| unless bytes_so_far_ is less than the | 
| -  //   hash size. | 
| +  // - not larger than |offset_| unless |offset_| is less than the hash size. | 
| std::vector<char> buffer(std::max<int64_t>( | 
| -      kMinBufferSize, std::min<int64_t>(kMaxBufferSize, bytes_so_far_))); | 
| +      kMinBufferSize, std::min<int64_t>(kMaxBufferSize, offset_))); | 
|  | 
| int64_t current_position = 0; | 
| -  while (current_position < bytes_so_far_) { | 
| +  while (current_position < offset_) { | 
| // While std::min needs to work with int64_t, the result is always at most | 
| // kMaxBufferSize, which fits on an int. | 
| int bytes_to_read = | 
| -        std::min<int64_t>(buffer.size(), bytes_so_far_ - current_position); | 
| +        std::min<int64_t>(buffer.size(), offset_ - current_position); | 
| int length = file_.ReadAtCurrentPos(&buffer.front(), bytes_to_read); | 
| if (length == -1) { | 
| return LogInterruptReason("Reading partial file", | 
| @@ -245,7 +250,7 @@ DownloadInterruptReason BaseFile::CalculatePartialHash( | 
| current_position += length; | 
| } | 
|  | 
| -  if (current_position != bytes_so_far_) { | 
| +  if (current_position != offset_) { | 
| return LogInterruptReason( | 
| "Verifying prefix hash", 0, DOWNLOAD_INTERRUPT_REASON_FILE_TOO_SHORT); | 
| } | 
| @@ -286,7 +291,16 @@ DownloadInterruptReason BaseFile::Open(const std::string& hash_so_far) { | 
|  | 
| net_log_.BeginEvent( | 
| net::NetLogEventType::DOWNLOAD_FILE_OPENED, | 
| -      base::Bind(&FileOpenedNetLogCallback, &full_path_, bytes_so_far_)); | 
| +      base::Bind(&FileOpenedNetLogCallback, &full_path_, offset_)); | 
| + | 
| +  if (access_mode_ == SHARED) { | 
| +    if (file_.Seek(base::File::FROM_BEGIN, offset_) < 0) { | 
| +      logging::SystemErrorCode error = logging::GetLastSystemErrorCode(); | 
| +      ClearFile(); | 
| +      return LogSystemError("Seeking to end", error); | 
| +    } | 
| +    return DOWNLOAD_INTERRUPT_REASON_NONE; | 
| +  } | 
|  | 
| if (!secure_hash_) { | 
| DownloadInterruptReason reason = CalculatePartialHash(hash_so_far); | 
| @@ -301,17 +315,17 @@ DownloadInterruptReason BaseFile::Open(const std::string& hash_so_far) { | 
| logging::SystemErrorCode error = logging::GetLastSystemErrorCode(); | 
| ClearFile(); | 
| return LogSystemError("Seeking to end", error); | 
| -  } else if (file_size > bytes_so_far_) { | 
| +  } else if (file_size > offset_) { | 
| // The file is larger than we expected. | 
| // This is OK, as long as we don't use the extra. | 
| // Truncate the file. | 
| -    if (!file_.SetLength(bytes_so_far_) || | 
| -        file_.Seek(base::File::FROM_BEGIN, bytes_so_far_) != bytes_so_far_) { | 
| +    if (!file_.SetLength(offset_) || | 
| +        file_.Seek(base::File::FROM_BEGIN, offset_) != offset_) { | 
| logging::SystemErrorCode error = logging::GetLastSystemErrorCode(); | 
| ClearFile(); | 
| return LogSystemError("Truncating to last known offset", error); | 
| } | 
| -  } else if (file_size < bytes_so_far_) { | 
| +  } else if (file_size < offset_) { | 
| // The file is shorter than we expected.  Our hashes won't be valid. | 
| ClearFile(); | 
| return LogInterruptReason("Unable to seek to last written point", 0, | 
|  |