Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // For loading files, we make use of overlapped i/o to ensure that reading from | 5 // For loading files, we make use of overlapped i/o to ensure that reading from |
| 6 // the filesystem (e.g., a network filesystem) does not block the calling | 6 // the filesystem (e.g., a network filesystem) does not block the calling |
| 7 // thread. An alternative approach would be to use a background thread or pool | 7 // thread. An alternative approach would be to use a background thread or pool |
| 8 // of threads, but it seems better to leverage the operating system's ability | 8 // of threads, but it seems better to leverage the operating system's ability |
| 9 // to do background file reads for us. | 9 // to do background file reads for us. |
| 10 // | 10 // |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 56 URLRequestFileJob::URLRequestFileJob( | 56 URLRequestFileJob::URLRequestFileJob( |
| 57 URLRequest* request, | 57 URLRequest* request, |
| 58 NetworkDelegate* network_delegate, | 58 NetworkDelegate* network_delegate, |
| 59 const base::FilePath& file_path, | 59 const base::FilePath& file_path, |
| 60 const scoped_refptr<base::TaskRunner>& file_task_runner) | 60 const scoped_refptr<base::TaskRunner>& file_task_runner) |
| 61 : URLRequestJob(request, network_delegate), | 61 : URLRequestJob(request, network_delegate), |
| 62 file_path_(file_path), | 62 file_path_(file_path), |
| 63 stream_(new FileStream(file_task_runner)), | 63 stream_(new FileStream(file_task_runner)), |
| 64 file_task_runner_(file_task_runner), | 64 file_task_runner_(file_task_runner), |
| 65 remaining_bytes_(0), | 65 remaining_bytes_(0), |
| 66 range_parse_result_(net::OK), | |
|
mmenke
2015/10/27 19:41:07
net:: not needed.
xunjieli
2015/10/27 21:24:07
Done.
| |
| 66 weak_ptr_factory_(this) {} | 67 weak_ptr_factory_(this) {} |
| 67 | 68 |
| 68 void URLRequestFileJob::Start() { | 69 void URLRequestFileJob::Start() { |
| 69 FileMetaInfo* meta_info = new FileMetaInfo(); | 70 FileMetaInfo* meta_info = new FileMetaInfo(); |
| 70 file_task_runner_->PostTaskAndReply( | 71 file_task_runner_->PostTaskAndReply( |
| 71 FROM_HERE, | 72 FROM_HERE, |
| 72 base::Bind(&URLRequestFileJob::FetchMetaInfo, file_path_, | 73 base::Bind(&URLRequestFileJob::FetchMetaInfo, file_path_, |
| 73 base::Unretained(meta_info)), | 74 base::Unretained(meta_info)), |
| 74 base::Bind(&URLRequestFileJob::DidFetchMetaInfo, | 75 base::Bind(&URLRequestFileJob::DidFetchMetaInfo, |
| 75 weak_ptr_factory_.GetWeakPtr(), | 76 weak_ptr_factory_.GetWeakPtr(), |
| 76 base::Owned(meta_info))); | 77 base::Owned(meta_info))); |
| 77 } | 78 } |
| 78 | 79 |
| 79 void URLRequestFileJob::Kill() { | 80 void URLRequestFileJob::Kill() { |
| 80 stream_.reset(); | 81 stream_.reset(); |
| 81 weak_ptr_factory_.InvalidateWeakPtrs(); | 82 weak_ptr_factory_.InvalidateWeakPtrs(); |
| 82 | 83 |
| 83 URLRequestJob::Kill(); | 84 URLRequestJob::Kill(); |
| 84 } | 85 } |
| 85 | 86 |
| 86 bool URLRequestFileJob::ReadRawData(IOBuffer* dest, | 87 int URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size) { |
| 87 int dest_size, | |
| 88 int* bytes_read) { | |
| 89 DCHECK_NE(dest_size, 0); | 88 DCHECK_NE(dest_size, 0); |
| 90 DCHECK(bytes_read); | |
| 91 DCHECK_GE(remaining_bytes_, 0); | 89 DCHECK_GE(remaining_bytes_, 0); |
| 92 | 90 |
| 93 if (remaining_bytes_ < dest_size) | 91 if (remaining_bytes_ < dest_size) |
| 94 dest_size = static_cast<int>(remaining_bytes_); | 92 dest_size = remaining_bytes_; |
| 95 | 93 |
| 96 // If we should copy zero bytes because |remaining_bytes_| is zero, short | 94 // If we should copy zero bytes because |remaining_bytes_| is zero, short |
| 97 // circuit here. | 95 // circuit here. |
| 98 if (!dest_size) { | 96 if (!dest_size) |
| 99 *bytes_read = 0; | 97 return 0; |
| 100 return true; | |
| 101 } | |
| 102 | 98 |
| 103 int rv = stream_->Read(dest, | 99 int rv = stream_->Read(dest, |
| 104 dest_size, | 100 dest_size, |
| 105 base::Bind(&URLRequestFileJob::DidRead, | 101 base::Bind(&URLRequestFileJob::DidRead, |
| 106 weak_ptr_factory_.GetWeakPtr(), | 102 weak_ptr_factory_.GetWeakPtr(), |
| 107 make_scoped_refptr(dest))); | 103 make_scoped_refptr(dest))); |
| 108 if (rv >= 0) { | 104 if (rv >= 0) { |
| 109 // Data is immediately available. | |
| 110 *bytes_read = rv; | |
| 111 remaining_bytes_ -= rv; | 105 remaining_bytes_ -= rv; |
| 112 DCHECK_GE(remaining_bytes_, 0); | 106 DCHECK_GE(remaining_bytes_, 0); |
| 113 return true; | |
| 114 } | 107 } |
| 115 | 108 |
| 116 // Otherwise, a read error occured. We may just need to wait... | 109 return rv; |
| 117 if (rv == ERR_IO_PENDING) { | |
| 118 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | |
| 119 } else { | |
| 120 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | |
| 121 } | |
| 122 return false; | |
| 123 } | 110 } |
| 124 | 111 |
| 125 bool URLRequestFileJob::IsRedirectResponse(GURL* location, | 112 bool URLRequestFileJob::IsRedirectResponse(GURL* location, |
| 126 int* http_status_code) { | 113 int* http_status_code) { |
| 127 if (meta_info_.is_directory) { | 114 if (meta_info_.is_directory) { |
| 128 // This happens when we discovered the file is a directory, so needs a | 115 // This happens when we discovered the file is a directory, so needs a |
| 129 // slash at the end of the path. | 116 // slash at the end of the path. |
| 130 std::string new_path = request_->url().path(); | 117 std::string new_path = request_->url().path(); |
| 131 new_path.push_back('/'); | 118 new_path.push_back('/'); |
| 132 GURL::Replacements replacements; | 119 GURL::Replacements replacements; |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 168 | 155 |
| 169 bool URLRequestFileJob::GetMimeType(std::string* mime_type) const { | 156 bool URLRequestFileJob::GetMimeType(std::string* mime_type) const { |
| 170 DCHECK(request_); | 157 DCHECK(request_); |
| 171 if (meta_info_.mime_type_result) { | 158 if (meta_info_.mime_type_result) { |
| 172 *mime_type = meta_info_.mime_type; | 159 *mime_type = meta_info_.mime_type; |
| 173 return true; | 160 return true; |
| 174 } | 161 } |
| 175 return false; | 162 return false; |
| 176 } | 163 } |
| 177 | 164 |
| 165 // Extracts headers that this job cares about from the supplied request headers. | |
| 166 // Currently this job only cares about the Range header. Note that validation is | |
| 167 // deferred to DidOpen, because NotifyStartError is not legal to call since | |
| 168 // the job has not started. | |
|
mmenke
2015/10/27 19:41:08
Method level documentation about what a method doe
xunjieli
2015/10/27 21:24:07
Done.
| |
| 178 void URLRequestFileJob::SetExtraRequestHeaders( | 169 void URLRequestFileJob::SetExtraRequestHeaders( |
| 179 const HttpRequestHeaders& headers) { | 170 const HttpRequestHeaders& headers) { |
| 180 std::string range_header; | 171 std::string range_header; |
| 181 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) { | 172 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) { |
| 182 // We only care about "Range" header here. | 173 // We only care about "Range" header here. This method stashes the value for |
| 174 // later use in DidOpen(), which is responsible for some of the range | |
| 175 // validation as well. | |
| 183 std::vector<HttpByteRange> ranges; | 176 std::vector<HttpByteRange> ranges; |
| 184 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) { | 177 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) { |
| 185 if (ranges.size() == 1) { | 178 if (ranges.size() == 1) { |
| 186 byte_range_ = ranges[0]; | 179 byte_range_ = ranges[0]; |
| 187 } else { | 180 } else { |
| 188 // We don't support multiple range requests in one single URL request, | 181 // We don't support multiple range requests in one single URL request, |
| 189 // because we need to do multipart encoding here. | 182 // because we need to do multipart encoding here. |
| 190 // TODO(hclam): decide whether we want to support multiple range | 183 // TODO(hclam): decide whether we want to support multiple range |
| 191 // requests. | 184 // requests. |
| 192 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | 185 range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE; |
| 193 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | |
| 194 } | 186 } |
| 195 } | 187 } |
| 196 } | 188 } |
| 197 } | 189 } |
| 198 | 190 |
| 199 void URLRequestFileJob::OnSeekComplete(int64 result) { | 191 void URLRequestFileJob::OnSeekComplete(int64 result) { |
| 200 } | 192 } |
| 201 | 193 |
| 202 void URLRequestFileJob::OnReadComplete(IOBuffer* buf, int result) { | 194 void URLRequestFileJob::OnReadComplete(IOBuffer* buf, int result) { |
| 203 } | 195 } |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 244 base::File::FLAG_ASYNC; | 236 base::File::FLAG_ASYNC; |
| 245 int rv = stream_->Open(file_path_, flags, | 237 int rv = stream_->Open(file_path_, flags, |
| 246 base::Bind(&URLRequestFileJob::DidOpen, | 238 base::Bind(&URLRequestFileJob::DidOpen, |
| 247 weak_ptr_factory_.GetWeakPtr())); | 239 weak_ptr_factory_.GetWeakPtr())); |
| 248 if (rv != ERR_IO_PENDING) | 240 if (rv != ERR_IO_PENDING) |
| 249 DidOpen(rv); | 241 DidOpen(rv); |
| 250 } | 242 } |
| 251 | 243 |
| 252 void URLRequestFileJob::DidOpen(int result) { | 244 void URLRequestFileJob::DidOpen(int result) { |
| 253 if (result != OK) { | 245 if (result != OK) { |
| 254 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); | 246 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); |
| 247 return; | |
| 248 } | |
| 249 | |
| 250 if (range_parse_result_ != net::OK) { | |
| 251 NotifyStartError( | |
| 252 URLRequestStatus(URLRequestStatus::FAILED, range_parse_result_)); | |
| 255 return; | 253 return; |
| 256 } | 254 } |
| 257 | 255 |
| 258 if (!byte_range_.ComputeBounds(meta_info_.file_size)) { | 256 if (!byte_range_.ComputeBounds(meta_info_.file_size)) { |
| 259 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | 257 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, |
| 260 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | 258 net::ERR_REQUEST_RANGE_NOT_SATISFIABLE)); |
| 261 return; | 259 return; |
| 262 } | 260 } |
| 263 | 261 |
| 264 remaining_bytes_ = byte_range_.last_byte_position() - | 262 remaining_bytes_ = byte_range_.last_byte_position() - |
| 265 byte_range_.first_byte_position() + 1; | 263 byte_range_.first_byte_position() + 1; |
| 266 DCHECK_GE(remaining_bytes_, 0); | 264 DCHECK_GE(remaining_bytes_, 0); |
| 267 | 265 |
| 268 if (remaining_bytes_ > 0 && byte_range_.first_byte_position() != 0) { | 266 if (remaining_bytes_ > 0 && byte_range_.first_byte_position() != 0) { |
| 269 int rv = stream_->Seek(byte_range_.first_byte_position(), | 267 int rv = stream_->Seek(byte_range_.first_byte_position(), |
| 270 base::Bind(&URLRequestFileJob::DidSeek, | 268 base::Bind(&URLRequestFileJob::DidSeek, |
| 271 weak_ptr_factory_.GetWeakPtr())); | 269 weak_ptr_factory_.GetWeakPtr())); |
| 272 if (rv != ERR_IO_PENDING) { | 270 if (rv != ERR_IO_PENDING) { |
| 273 // stream_->Seek() failed, so pass an intentionally erroneous value | 271 // stream_->Seek() failed, so pass an intentionally erroneous value |
| 274 // into DidSeek(). | 272 // into DidSeek(). |
| 275 DidSeek(-1); | 273 DidSeek(-1); |
| 276 } | 274 } |
| 277 } else { | 275 } else { |
| 278 // We didn't need to call stream_->Seek() at all, so we pass to DidSeek() | 276 // We didn't need to call stream_->Seek() at all, so we pass to DidSeek() |
| 279 // the value that would mean seek success. This way we skip the code | 277 // the value that would mean seek success. This way we skip the code |
| 280 // handling seek failure. | 278 // handling seek failure. |
| 281 DidSeek(byte_range_.first_byte_position()); | 279 DidSeek(byte_range_.first_byte_position()); |
| 282 } | 280 } |
| 283 } | 281 } |
| 284 | 282 |
| 285 void URLRequestFileJob::DidSeek(int64 result) { | 283 void URLRequestFileJob::DidSeek(int64 result) { |
| 286 OnSeekComplete(result); | 284 OnSeekComplete(result); |
| 287 if (result != byte_range_.first_byte_position()) { | 285 if (result != byte_range_.first_byte_position()) { |
| 288 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | 286 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, |
| 289 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | 287 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); |
| 290 return; | 288 return; |
| 291 } | 289 } |
| 292 | 290 |
| 293 set_expected_content_size(remaining_bytes_); | 291 set_expected_content_size(remaining_bytes_); |
| 294 NotifyHeadersComplete(); | 292 NotifyHeadersComplete(); |
| 295 } | 293 } |
| 296 | 294 |
| 297 void URLRequestFileJob::DidRead(scoped_refptr<IOBuffer> buf, int result) { | 295 void URLRequestFileJob::DidRead(scoped_refptr<IOBuffer> buf, int result) { |
| 298 if (result > 0) { | 296 if (result >= 0) { |
| 299 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status | |
| 300 remaining_bytes_ -= result; | 297 remaining_bytes_ -= result; |
| 301 DCHECK_GE(remaining_bytes_, 0); | 298 DCHECK_GE(remaining_bytes_, 0); |
| 302 } | 299 } |
| 303 | 300 |
| 304 OnReadComplete(buf.get(), result); | 301 OnReadComplete(buf.get(), result); |
| 305 buf = NULL; | 302 buf = NULL; |
| 306 | 303 |
| 307 if (result == 0) { | 304 ReadRawDataComplete(result); |
| 308 NotifyDone(URLRequestStatus()); | |
| 309 } else if (result < 0) { | |
| 310 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); | |
| 311 } | |
| 312 | |
| 313 NotifyReadComplete(result); | |
| 314 } | 305 } |
| 315 | 306 |
| 316 } // namespace net | 307 } // namespace net |
| OLD | NEW |