OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // For loading files, we make use of overlapped i/o to ensure that reading from | 5 // For loading files, we make use of overlapped i/o to ensure that reading from |
6 // the filesystem (e.g., a network filesystem) does not block the calling | 6 // the filesystem (e.g., a network filesystem) does not block the calling |
7 // thread. An alternative approach would be to use a background thread or pool | 7 // thread. An alternative approach would be to use a background thread or pool |
8 // of threads, but it seems better to leverage the operating system's ability | 8 // of threads, but it seems better to leverage the operating system's ability |
9 // to do background file reads for us. | 9 // to do background file reads for us. |
10 // | 10 // |
(...skipping 15 matching lines...) Expand all Loading... |
26 #include "base/strings/string_util.h" | 26 #include "base/strings/string_util.h" |
27 #include "base/synchronization/lock.h" | 27 #include "base/synchronization/lock.h" |
28 #include "base/task_runner.h" | 28 #include "base/task_runner.h" |
29 #include "base/threading/thread_restrictions.h" | 29 #include "base/threading/thread_restrictions.h" |
30 #include "build/build_config.h" | 30 #include "build/build_config.h" |
31 #include "net/base/file_stream.h" | 31 #include "net/base/file_stream.h" |
32 #include "net/base/filename_util.h" | 32 #include "net/base/filename_util.h" |
33 #include "net/base/io_buffer.h" | 33 #include "net/base/io_buffer.h" |
34 #include "net/base/load_flags.h" | 34 #include "net/base/load_flags.h" |
35 #include "net/base/mime_util.h" | 35 #include "net/base/mime_util.h" |
| 36 #include "net/base/net_errors.h" |
36 #include "net/filter/filter.h" | 37 #include "net/filter/filter.h" |
37 #include "net/http/http_util.h" | 38 #include "net/http/http_util.h" |
38 #include "net/url_request/url_request_error_job.h" | 39 #include "net/url_request/url_request_error_job.h" |
39 #include "net/url_request/url_request_file_dir_job.h" | 40 #include "net/url_request/url_request_file_dir_job.h" |
40 #include "url/gurl.h" | 41 #include "url/gurl.h" |
41 | 42 |
42 #if defined(OS_WIN) | 43 #if defined(OS_WIN) |
43 #include "base/win/shortcut.h" | 44 #include "base/win/shortcut.h" |
44 #endif | 45 #endif |
45 | 46 |
46 namespace net { | 47 namespace net { |
47 | 48 |
48 URLRequestFileJob::FileMetaInfo::FileMetaInfo() | 49 URLRequestFileJob::FileMetaInfo::FileMetaInfo() |
49 : file_size(0), | 50 : file_size(0), |
50 mime_type_result(false), | 51 mime_type_result(false), |
51 file_exists(false), | 52 file_exists(false), |
52 is_directory(false) { | 53 is_directory(false) { |
53 } | 54 } |
54 | 55 |
55 URLRequestFileJob::URLRequestFileJob( | 56 URLRequestFileJob::URLRequestFileJob( |
56 URLRequest* request, | 57 URLRequest* request, |
57 NetworkDelegate* network_delegate, | 58 NetworkDelegate* network_delegate, |
58 const base::FilePath& file_path, | 59 const base::FilePath& file_path, |
59 const scoped_refptr<base::TaskRunner>& file_task_runner) | 60 const scoped_refptr<base::TaskRunner>& file_task_runner) |
60 : URLRequestJob(request, network_delegate), | 61 : URLRequestJob(request, network_delegate), |
61 file_path_(file_path), | 62 file_path_(file_path), |
62 stream_(new FileStream(file_task_runner)), | 63 stream_(new FileStream(file_task_runner)), |
63 file_task_runner_(file_task_runner), | 64 file_task_runner_(file_task_runner), |
64 remaining_bytes_(0), | 65 remaining_bytes_(0), |
65 range_parse_result_(OK), | |
66 weak_ptr_factory_(this) {} | 66 weak_ptr_factory_(this) {} |
67 | 67 |
68 void URLRequestFileJob::Start() { | 68 void URLRequestFileJob::Start() { |
69 FileMetaInfo* meta_info = new FileMetaInfo(); | 69 FileMetaInfo* meta_info = new FileMetaInfo(); |
70 file_task_runner_->PostTaskAndReply( | 70 file_task_runner_->PostTaskAndReply( |
71 FROM_HERE, | 71 FROM_HERE, |
72 base::Bind(&URLRequestFileJob::FetchMetaInfo, file_path_, | 72 base::Bind(&URLRequestFileJob::FetchMetaInfo, file_path_, |
73 base::Unretained(meta_info)), | 73 base::Unretained(meta_info)), |
74 base::Bind(&URLRequestFileJob::DidFetchMetaInfo, | 74 base::Bind(&URLRequestFileJob::DidFetchMetaInfo, |
75 weak_ptr_factory_.GetWeakPtr(), | 75 weak_ptr_factory_.GetWeakPtr(), |
76 base::Owned(meta_info))); | 76 base::Owned(meta_info))); |
77 } | 77 } |
78 | 78 |
79 void URLRequestFileJob::Kill() { | 79 void URLRequestFileJob::Kill() { |
80 stream_.reset(); | 80 stream_.reset(); |
81 weak_ptr_factory_.InvalidateWeakPtrs(); | 81 weak_ptr_factory_.InvalidateWeakPtrs(); |
82 | 82 |
83 URLRequestJob::Kill(); | 83 URLRequestJob::Kill(); |
84 } | 84 } |
85 | 85 |
86 int URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size) { | 86 bool URLRequestFileJob::ReadRawData(IOBuffer* dest, |
| 87 int dest_size, |
| 88 int* bytes_read) { |
87 DCHECK_NE(dest_size, 0); | 89 DCHECK_NE(dest_size, 0); |
| 90 DCHECK(bytes_read); |
88 DCHECK_GE(remaining_bytes_, 0); | 91 DCHECK_GE(remaining_bytes_, 0); |
89 | 92 |
90 if (remaining_bytes_ < dest_size) | 93 if (remaining_bytes_ < dest_size) |
91 dest_size = remaining_bytes_; | 94 dest_size = static_cast<int>(remaining_bytes_); |
92 | 95 |
93 // If we should copy zero bytes because |remaining_bytes_| is zero, short | 96 // If we should copy zero bytes because |remaining_bytes_| is zero, short |
94 // circuit here. | 97 // circuit here. |
95 if (!dest_size) | 98 if (!dest_size) { |
96 return 0; | 99 *bytes_read = 0; |
| 100 return true; |
| 101 } |
97 | 102 |
98 int rv = stream_->Read(dest, | 103 int rv = stream_->Read(dest, |
99 dest_size, | 104 dest_size, |
100 base::Bind(&URLRequestFileJob::DidRead, | 105 base::Bind(&URLRequestFileJob::DidRead, |
101 weak_ptr_factory_.GetWeakPtr(), | 106 weak_ptr_factory_.GetWeakPtr(), |
102 make_scoped_refptr(dest))); | 107 make_scoped_refptr(dest))); |
103 if (rv >= 0) { | 108 if (rv >= 0) { |
| 109 // Data is immediately available. |
| 110 *bytes_read = rv; |
104 remaining_bytes_ -= rv; | 111 remaining_bytes_ -= rv; |
105 DCHECK_GE(remaining_bytes_, 0); | 112 DCHECK_GE(remaining_bytes_, 0); |
| 113 return true; |
106 } | 114 } |
107 | 115 |
108 return rv; | 116 // Otherwise, a read error occured. We may just need to wait... |
| 117 if (rv == ERR_IO_PENDING) { |
| 118 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
| 119 } else { |
| 120 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
| 121 } |
| 122 return false; |
109 } | 123 } |
110 | 124 |
111 bool URLRequestFileJob::IsRedirectResponse(GURL* location, | 125 bool URLRequestFileJob::IsRedirectResponse(GURL* location, |
112 int* http_status_code) { | 126 int* http_status_code) { |
113 if (meta_info_.is_directory) { | 127 if (meta_info_.is_directory) { |
114 // This happens when we discovered the file is a directory, so needs a | 128 // This happens when we discovered the file is a directory, so needs a |
115 // slash at the end of the path. | 129 // slash at the end of the path. |
116 std::string new_path = request_->url().path(); | 130 std::string new_path = request_->url().path(); |
117 new_path.push_back('/'); | 131 new_path.push_back('/'); |
118 GURL::Replacements replacements; | 132 GURL::Replacements replacements; |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
158 *mime_type = meta_info_.mime_type; | 172 *mime_type = meta_info_.mime_type; |
159 return true; | 173 return true; |
160 } | 174 } |
161 return false; | 175 return false; |
162 } | 176 } |
163 | 177 |
164 void URLRequestFileJob::SetExtraRequestHeaders( | 178 void URLRequestFileJob::SetExtraRequestHeaders( |
165 const HttpRequestHeaders& headers) { | 179 const HttpRequestHeaders& headers) { |
166 std::string range_header; | 180 std::string range_header; |
167 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) { | 181 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) { |
168 // This job only cares about the Range header. This method stashes the value | 182 // We only care about "Range" header here. |
169 // for later use in DidOpen(), which is responsible for some of the range | |
170 // validation as well. NotifyStartError is not legal to call here since | |
171 // the job has not started. | |
172 std::vector<HttpByteRange> ranges; | 183 std::vector<HttpByteRange> ranges; |
173 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) { | 184 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) { |
174 if (ranges.size() == 1) { | 185 if (ranges.size() == 1) { |
175 byte_range_ = ranges[0]; | 186 byte_range_ = ranges[0]; |
176 } else { | 187 } else { |
177 // We don't support multiple range requests in one single URL request, | 188 // We don't support multiple range requests in one single URL request, |
178 // because we need to do multipart encoding here. | 189 // because we need to do multipart encoding here. |
179 // TODO(hclam): decide whether we want to support multiple range | 190 // TODO(hclam): decide whether we want to support multiple range |
180 // requests. | 191 // requests. |
181 range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE; | 192 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, |
| 193 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); |
182 } | 194 } |
183 } | 195 } |
184 } | 196 } |
185 } | 197 } |
186 | 198 |
187 void URLRequestFileJob::OnSeekComplete(int64 result) { | 199 void URLRequestFileJob::OnSeekComplete(int64 result) { |
188 } | 200 } |
189 | 201 |
190 void URLRequestFileJob::OnReadComplete(IOBuffer* buf, int result) { | 202 void URLRequestFileJob::OnReadComplete(IOBuffer* buf, int result) { |
191 } | 203 } |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
232 base::File::FLAG_ASYNC; | 244 base::File::FLAG_ASYNC; |
233 int rv = stream_->Open(file_path_, flags, | 245 int rv = stream_->Open(file_path_, flags, |
234 base::Bind(&URLRequestFileJob::DidOpen, | 246 base::Bind(&URLRequestFileJob::DidOpen, |
235 weak_ptr_factory_.GetWeakPtr())); | 247 weak_ptr_factory_.GetWeakPtr())); |
236 if (rv != ERR_IO_PENDING) | 248 if (rv != ERR_IO_PENDING) |
237 DidOpen(rv); | 249 DidOpen(rv); |
238 } | 250 } |
239 | 251 |
240 void URLRequestFileJob::DidOpen(int result) { | 252 void URLRequestFileJob::DidOpen(int result) { |
241 if (result != OK) { | 253 if (result != OK) { |
242 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); | 254 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); |
243 return; | |
244 } | |
245 | |
246 if (range_parse_result_ != net::OK) { | |
247 NotifyStartError( | |
248 URLRequestStatus(URLRequestStatus::FAILED, range_parse_result_)); | |
249 return; | 255 return; |
250 } | 256 } |
251 | 257 |
252 if (!byte_range_.ComputeBounds(meta_info_.file_size)) { | 258 if (!byte_range_.ComputeBounds(meta_info_.file_size)) { |
253 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, | 259 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, |
254 net::ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | 260 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); |
255 return; | 261 return; |
256 } | 262 } |
257 | 263 |
258 remaining_bytes_ = byte_range_.last_byte_position() - | 264 remaining_bytes_ = byte_range_.last_byte_position() - |
259 byte_range_.first_byte_position() + 1; | 265 byte_range_.first_byte_position() + 1; |
260 DCHECK_GE(remaining_bytes_, 0); | 266 DCHECK_GE(remaining_bytes_, 0); |
261 | 267 |
262 if (remaining_bytes_ > 0 && byte_range_.first_byte_position() != 0) { | 268 if (remaining_bytes_ > 0 && byte_range_.first_byte_position() != 0) { |
263 int rv = stream_->Seek(byte_range_.first_byte_position(), | 269 int rv = stream_->Seek(byte_range_.first_byte_position(), |
264 base::Bind(&URLRequestFileJob::DidSeek, | 270 base::Bind(&URLRequestFileJob::DidSeek, |
265 weak_ptr_factory_.GetWeakPtr())); | 271 weak_ptr_factory_.GetWeakPtr())); |
266 if (rv != ERR_IO_PENDING) { | 272 if (rv != ERR_IO_PENDING) { |
267 // stream_->Seek() failed, so pass an intentionally erroneous value | 273 // stream_->Seek() failed, so pass an intentionally erroneous value |
268 // into DidSeek(). | 274 // into DidSeek(). |
269 DidSeek(-1); | 275 DidSeek(-1); |
270 } | 276 } |
271 } else { | 277 } else { |
272 // We didn't need to call stream_->Seek() at all, so we pass to DidSeek() | 278 // We didn't need to call stream_->Seek() at all, so we pass to DidSeek() |
273 // the value that would mean seek success. This way we skip the code | 279 // the value that would mean seek success. This way we skip the code |
274 // handling seek failure. | 280 // handling seek failure. |
275 DidSeek(byte_range_.first_byte_position()); | 281 DidSeek(byte_range_.first_byte_position()); |
276 } | 282 } |
277 } | 283 } |
278 | 284 |
279 void URLRequestFileJob::DidSeek(int64 result) { | 285 void URLRequestFileJob::DidSeek(int64 result) { |
280 OnSeekComplete(result); | 286 OnSeekComplete(result); |
281 if (result != byte_range_.first_byte_position()) { | 287 if (result != byte_range_.first_byte_position()) { |
282 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, | 288 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, |
283 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | 289 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); |
284 return; | 290 return; |
285 } | 291 } |
286 | 292 |
287 set_expected_content_size(remaining_bytes_); | 293 set_expected_content_size(remaining_bytes_); |
288 NotifyHeadersComplete(); | 294 NotifyHeadersComplete(); |
289 } | 295 } |
290 | 296 |
291 void URLRequestFileJob::DidRead(scoped_refptr<IOBuffer> buf, int result) { | 297 void URLRequestFileJob::DidRead(scoped_refptr<IOBuffer> buf, int result) { |
292 if (result >= 0) { | 298 if (result > 0) { |
| 299 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status |
293 remaining_bytes_ -= result; | 300 remaining_bytes_ -= result; |
294 DCHECK_GE(remaining_bytes_, 0); | 301 DCHECK_GE(remaining_bytes_, 0); |
295 } | 302 } |
296 | 303 |
297 OnReadComplete(buf.get(), result); | 304 OnReadComplete(buf.get(), result); |
298 buf = NULL; | 305 buf = NULL; |
299 | 306 |
300 ReadRawDataComplete(result); | 307 if (result == 0) { |
| 308 NotifyDone(URLRequestStatus()); |
| 309 } else if (result < 0) { |
| 310 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); |
| 311 } |
| 312 |
| 313 NotifyReadComplete(result); |
301 } | 314 } |
302 | 315 |
303 } // namespace net | 316 } // namespace net |
OLD | NEW |