OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // For loading files, we make use of overlapped i/o to ensure that reading from | 5 // For loading files, we make use of overlapped i/o to ensure that reading from |
6 // the filesystem (e.g., a network filesystem) does not block the calling | 6 // the filesystem (e.g., a network filesystem) does not block the calling |
7 // thread. An alternative approach would be to use a background thread or pool | 7 // thread. An alternative approach would be to use a background thread or pool |
8 // of threads, but it seems better to leverage the operating system's ability | 8 // of threads, but it seems better to leverage the operating system's ability |
9 // to do background file reads for us. | 9 // to do background file reads for us. |
10 // | 10 // |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
56 URLRequestFileJob::URLRequestFileJob( | 56 URLRequestFileJob::URLRequestFileJob( |
57 URLRequest* request, | 57 URLRequest* request, |
58 NetworkDelegate* network_delegate, | 58 NetworkDelegate* network_delegate, |
59 const base::FilePath& file_path, | 59 const base::FilePath& file_path, |
60 const scoped_refptr<base::TaskRunner>& file_task_runner) | 60 const scoped_refptr<base::TaskRunner>& file_task_runner) |
61 : URLRequestJob(request, network_delegate), | 61 : URLRequestJob(request, network_delegate), |
62 file_path_(file_path), | 62 file_path_(file_path), |
63 stream_(new FileStream(file_task_runner)), | 63 stream_(new FileStream(file_task_runner)), |
64 file_task_runner_(file_task_runner), | 64 file_task_runner_(file_task_runner), |
65 remaining_bytes_(0), | 65 remaining_bytes_(0), |
66 range_parse_result_(net::OK), | |
66 weak_ptr_factory_(this) {} | 67 weak_ptr_factory_(this) {} |
67 | 68 |
68 void URLRequestFileJob::Start() { | 69 void URLRequestFileJob::Start() { |
69 FileMetaInfo* meta_info = new FileMetaInfo(); | 70 FileMetaInfo* meta_info = new FileMetaInfo(); |
70 file_task_runner_->PostTaskAndReply( | 71 file_task_runner_->PostTaskAndReply( |
71 FROM_HERE, | 72 FROM_HERE, |
72 base::Bind(&URLRequestFileJob::FetchMetaInfo, file_path_, | 73 base::Bind(&URLRequestFileJob::FetchMetaInfo, file_path_, |
73 base::Unretained(meta_info)), | 74 base::Unretained(meta_info)), |
74 base::Bind(&URLRequestFileJob::DidFetchMetaInfo, | 75 base::Bind(&URLRequestFileJob::DidFetchMetaInfo, |
75 weak_ptr_factory_.GetWeakPtr(), | 76 weak_ptr_factory_.GetWeakPtr(), |
76 base::Owned(meta_info))); | 77 base::Owned(meta_info))); |
77 } | 78 } |
78 | 79 |
79 void URLRequestFileJob::Kill() { | 80 void URLRequestFileJob::Kill() { |
80 stream_.reset(); | 81 stream_.reset(); |
81 weak_ptr_factory_.InvalidateWeakPtrs(); | 82 weak_ptr_factory_.InvalidateWeakPtrs(); |
82 | 83 |
83 URLRequestJob::Kill(); | 84 URLRequestJob::Kill(); |
84 } | 85 } |
85 | 86 |
86 bool URLRequestFileJob::ReadRawData(IOBuffer* dest, | 87 int URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size) { |
87 int dest_size, | |
88 int* bytes_read) { | |
89 DCHECK_NE(dest_size, 0); | 88 DCHECK_NE(dest_size, 0); |
90 DCHECK(bytes_read); | |
91 DCHECK_GE(remaining_bytes_, 0); | 89 DCHECK_GE(remaining_bytes_, 0); |
92 | 90 |
93 if (remaining_bytes_ < dest_size) | 91 if (remaining_bytes_ < dest_size) |
94 dest_size = static_cast<int>(remaining_bytes_); | 92 dest_size = remaining_bytes_; |
95 | 93 |
96 // If we should copy zero bytes because |remaining_bytes_| is zero, short | 94 // If we should copy zero bytes because |remaining_bytes_| is zero, short |
97 // circuit here. | 95 // circuit here. |
98 if (!dest_size) { | 96 if (!dest_size) |
99 *bytes_read = 0; | 97 return 0; |
100 return true; | |
101 } | |
102 | 98 |
103 int rv = stream_->Read(dest, | 99 int rv = stream_->Read(dest, |
104 dest_size, | 100 dest_size, |
105 base::Bind(&URLRequestFileJob::DidRead, | 101 base::Bind(&URLRequestFileJob::DidRead, |
106 weak_ptr_factory_.GetWeakPtr(), | 102 weak_ptr_factory_.GetWeakPtr(), |
107 make_scoped_refptr(dest))); | 103 make_scoped_refptr(dest))); |
108 if (rv >= 0) { | 104 if (rv >= 0) { |
109 // Data is immediately available. | |
110 *bytes_read = rv; | |
111 remaining_bytes_ -= rv; | 105 remaining_bytes_ -= rv; |
112 DCHECK_GE(remaining_bytes_, 0); | 106 DCHECK_GE(remaining_bytes_, 0); |
113 return true; | |
114 } | 107 } |
115 | 108 |
116 // Otherwise, a read error occured. We may just need to wait... | 109 return rv; |
117 if (rv == ERR_IO_PENDING) { | |
118 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | |
119 } else { | |
120 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | |
121 } | |
122 return false; | |
123 } | 110 } |
124 | 111 |
125 bool URLRequestFileJob::IsRedirectResponse(GURL* location, | 112 bool URLRequestFileJob::IsRedirectResponse(GURL* location, |
126 int* http_status_code) { | 113 int* http_status_code) { |
127 if (meta_info_.is_directory) { | 114 if (meta_info_.is_directory) { |
128 // This happens when we discovered the file is a directory, so needs a | 115 // This happens when we discovered the file is a directory, so needs a |
129 // slash at the end of the path. | 116 // slash at the end of the path. |
130 std::string new_path = request_->url().path(); | 117 std::string new_path = request_->url().path(); |
131 new_path.push_back('/'); | 118 new_path.push_back('/'); |
132 GURL::Replacements replacements; | 119 GURL::Replacements replacements; |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
168 | 155 |
169 bool URLRequestFileJob::GetMimeType(std::string* mime_type) const { | 156 bool URLRequestFileJob::GetMimeType(std::string* mime_type) const { |
170 DCHECK(request_); | 157 DCHECK(request_); |
171 if (meta_info_.mime_type_result) { | 158 if (meta_info_.mime_type_result) { |
172 *mime_type = meta_info_.mime_type; | 159 *mime_type = meta_info_.mime_type; |
173 return true; | 160 return true; |
174 } | 161 } |
175 return false; | 162 return false; |
176 } | 163 } |
177 | 164 |
165 // Extracts headers that this job cares about from the supplied request headers. | |
166 // Currently this job only cares about the Range header. Note that validation is | |
167 // deferred to DidOpen, because this method may be called with a | |
168 // URLRequest::Delegate call still on the stack, which means NotifyStartError is | |
169 // not safe to call (it may reenter the delegate). | |
Randy Smith (Not in Mondays)
2015/10/22 20:38:45
This comment is useful, but there are several othe
xunjieli
2015/10/23 13:43:08
Done.
I have a question though. Since SetExtraReq
Randy Smith (Not in Mondays)
2015/10/26 21:38:03
Hmmm. Good point; I think you're right. Specific
xunjieli
2015/10/27 14:17:21
Done. I've checked with Elly too.
| |
178 void URLRequestFileJob::SetExtraRequestHeaders( | 170 void URLRequestFileJob::SetExtraRequestHeaders( |
179 const HttpRequestHeaders& headers) { | 171 const HttpRequestHeaders& headers) { |
180 std::string range_header; | 172 std::string range_header; |
181 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) { | 173 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) { |
182 // We only care about "Range" header here. | 174 // We only care about "Range" header here. This method stashes the value for |
175 // later use in DidRead(), which is responsible for some of the range | |
Randy Smith (Not in Mondays)
2015/10/22 20:38:45
Should this refer to DidOpen() rather than DidRead
xunjieli
2015/10/23 13:43:08
Done.
| |
176 // validation as well. | |
183 std::vector<HttpByteRange> ranges; | 177 std::vector<HttpByteRange> ranges; |
184 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) { | 178 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) { |
185 if (ranges.size() == 1) { | 179 if (ranges.size() == 1) { |
186 byte_range_ = ranges[0]; | 180 byte_range_ = ranges[0]; |
187 } else { | 181 } else { |
188 // We don't support multiple range requests in one single URL request, | 182 // We don't support multiple range requests in one single URL request, |
189 // because we need to do multipart encoding here. | 183 // because we need to do multipart encoding here. |
190 // TODO(hclam): decide whether we want to support multiple range | 184 // TODO(hclam): decide whether we want to support multiple range |
191 // requests. | 185 // requests. |
192 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | 186 range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE; |
193 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | |
194 } | 187 } |
195 } | 188 } |
196 } | 189 } |
197 } | 190 } |
198 | 191 |
199 void URLRequestFileJob::OnSeekComplete(int64 result) { | 192 void URLRequestFileJob::OnSeekComplete(int64 result) { |
200 } | 193 } |
201 | 194 |
202 void URLRequestFileJob::OnReadComplete(IOBuffer* buf, int result) { | 195 void URLRequestFileJob::OnReadComplete(IOBuffer* buf, int result) { |
203 } | 196 } |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
244 base::File::FLAG_ASYNC; | 237 base::File::FLAG_ASYNC; |
245 int rv = stream_->Open(file_path_, flags, | 238 int rv = stream_->Open(file_path_, flags, |
246 base::Bind(&URLRequestFileJob::DidOpen, | 239 base::Bind(&URLRequestFileJob::DidOpen, |
247 weak_ptr_factory_.GetWeakPtr())); | 240 weak_ptr_factory_.GetWeakPtr())); |
248 if (rv != ERR_IO_PENDING) | 241 if (rv != ERR_IO_PENDING) |
249 DidOpen(rv); | 242 DidOpen(rv); |
250 } | 243 } |
251 | 244 |
252 void URLRequestFileJob::DidOpen(int result) { | 245 void URLRequestFileJob::DidOpen(int result) { |
253 if (result != OK) { | 246 if (result != OK) { |
254 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); | 247 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); |
248 return; | |
249 } | |
250 | |
251 if (range_parse_result_ != net::OK) { | |
252 NotifyStartError( | |
253 URLRequestStatus(URLRequestStatus::FAILED, range_parse_result_)); | |
255 return; | 254 return; |
256 } | 255 } |
257 | 256 |
258 if (!byte_range_.ComputeBounds(meta_info_.file_size)) { | 257 if (!byte_range_.ComputeBounds(meta_info_.file_size)) { |
259 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | 258 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, |
260 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | 259 net::ERR_REQUEST_RANGE_NOT_SATISFIABLE)); |
261 return; | 260 return; |
262 } | 261 } |
263 | 262 |
264 remaining_bytes_ = byte_range_.last_byte_position() - | 263 remaining_bytes_ = byte_range_.last_byte_position() - |
265 byte_range_.first_byte_position() + 1; | 264 byte_range_.first_byte_position() + 1; |
266 DCHECK_GE(remaining_bytes_, 0); | 265 DCHECK_GE(remaining_bytes_, 0); |
267 | 266 |
268 if (remaining_bytes_ > 0 && byte_range_.first_byte_position() != 0) { | 267 if (remaining_bytes_ > 0 && byte_range_.first_byte_position() != 0) { |
269 int rv = stream_->Seek(byte_range_.first_byte_position(), | 268 int rv = stream_->Seek(byte_range_.first_byte_position(), |
270 base::Bind(&URLRequestFileJob::DidSeek, | 269 base::Bind(&URLRequestFileJob::DidSeek, |
271 weak_ptr_factory_.GetWeakPtr())); | 270 weak_ptr_factory_.GetWeakPtr())); |
272 if (rv != ERR_IO_PENDING) { | 271 if (rv != ERR_IO_PENDING) { |
273 // stream_->Seek() failed, so pass an intentionally erroneous value | 272 // stream_->Seek() failed, so pass an intentionally erroneous value |
274 // into DidSeek(). | 273 // into DidSeek(). |
275 DidSeek(-1); | 274 DidSeek(-1); |
276 } | 275 } |
277 } else { | 276 } else { |
278 // We didn't need to call stream_->Seek() at all, so we pass to DidSeek() | 277 // We didn't need to call stream_->Seek() at all, so we pass to DidSeek() |
279 // the value that would mean seek success. This way we skip the code | 278 // the value that would mean seek success. This way we skip the code |
280 // handling seek failure. | 279 // handling seek failure. |
281 DidSeek(byte_range_.first_byte_position()); | 280 DidSeek(byte_range_.first_byte_position()); |
282 } | 281 } |
283 } | 282 } |
284 | 283 |
285 void URLRequestFileJob::DidSeek(int64 result) { | 284 void URLRequestFileJob::DidSeek(int64 result) { |
286 OnSeekComplete(result); | 285 OnSeekComplete(result); |
287 if (result != byte_range_.first_byte_position()) { | 286 if (result != byte_range_.first_byte_position()) { |
288 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | 287 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, |
289 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | 288 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); |
290 return; | 289 return; |
291 } | 290 } |
292 | 291 |
293 set_expected_content_size(remaining_bytes_); | 292 set_expected_content_size(remaining_bytes_); |
294 NotifyHeadersComplete(); | 293 NotifyHeadersComplete(); |
295 } | 294 } |
296 | 295 |
297 void URLRequestFileJob::DidRead(scoped_refptr<IOBuffer> buf, int result) { | 296 void URLRequestFileJob::DidRead(scoped_refptr<IOBuffer> buf, int result) { |
298 if (result > 0) { | 297 if (result >= 0) { |
299 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status | |
300 remaining_bytes_ -= result; | 298 remaining_bytes_ -= result; |
301 DCHECK_GE(remaining_bytes_, 0); | 299 DCHECK_GE(remaining_bytes_, 0); |
302 } | 300 } |
303 | 301 |
304 OnReadComplete(buf.get(), result); | 302 OnReadComplete(buf.get(), result); |
305 buf = NULL; | 303 buf = NULL; |
306 | 304 |
307 if (result == 0) { | 305 ReadRawDataComplete(result); |
308 NotifyDone(URLRequestStatus()); | |
309 } else if (result < 0) { | |
310 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); | |
311 } | |
312 | |
313 NotifyReadComplete(result); | |
314 } | 306 } |
315 | 307 |
316 } // namespace net | 308 } // namespace net |
OLD | NEW |