OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // For loading files, we make use of overlapped i/o to ensure that reading from | 5 // For loading files, we make use of overlapped i/o to ensure that reading from |
6 // the filesystem (e.g., a network filesystem) does not block the calling | 6 // the filesystem (e.g., a network filesystem) does not block the calling |
7 // thread. An alternative approach would be to use a background thread or pool | 7 // thread. An alternative approach would be to use a background thread or pool |
8 // of threads, but it seems better to leverage the operating system's ability | 8 // of threads, but it seems better to leverage the operating system's ability |
9 // to do background file reads for us. | 9 // to do background file reads for us. |
10 // | 10 // |
(...skipping 24 matching lines...) Expand all Loading... | |
35 #include "net/base/net_errors.h" | 35 #include "net/base/net_errors.h" |
36 #include "net/base/net_util.h" | 36 #include "net/base/net_util.h" |
37 #include "net/http/http_util.h" | 37 #include "net/http/http_util.h" |
38 #include "net/url_request/url_request.h" | 38 #include "net/url_request/url_request.h" |
39 #include "net/url_request/url_request_context.h" | 39 #include "net/url_request/url_request_context.h" |
40 #include "net/url_request/url_request_error_job.h" | 40 #include "net/url_request/url_request_error_job.h" |
41 #include "net/url_request/url_request_file_dir_job.h" | 41 #include "net/url_request/url_request_file_dir_job.h" |
42 | 42 |
43 namespace net { | 43 namespace net { |
44 | 44 |
45 class URLRequestFileJob::AsyncResolver | 45 URLRequestFileJob::FileMetaInfo::FileMetaInfo() |
46 : public base::RefCountedThreadSafe<URLRequestFileJob::AsyncResolver> { | 46 : file_size(-1), |
47 public: | 47 mime_type_result(false), |
48 explicit AsyncResolver(URLRequestFileJob* owner) | 48 file_exists(false), |
49 : owner_(owner), owner_loop_(MessageLoop::current()) { | 49 is_directory(false) { |
50 } | 50 } |
51 | |
52 void Resolve(const FilePath& file_path) { | |
53 base::PlatformFileInfo file_info; | |
54 bool exists = file_util::GetFileInfo(file_path, &file_info); | |
55 base::AutoLock locked(lock_); | |
56 if (owner_loop_) { | |
57 owner_loop_->PostTask( | |
58 FROM_HERE, | |
59 base::Bind(&AsyncResolver::ReturnResults, this, exists, file_info)); | |
60 } | |
61 } | |
62 | |
63 void Cancel() { | |
64 owner_ = NULL; | |
65 | |
66 base::AutoLock locked(lock_); | |
67 owner_loop_ = NULL; | |
68 } | |
69 | |
70 private: | |
71 friend class base::RefCountedThreadSafe<URLRequestFileJob::AsyncResolver>; | |
72 | |
73 ~AsyncResolver() {} | |
74 | |
75 void ReturnResults(bool exists, const base::PlatformFileInfo& file_info) { | |
76 if (owner_) | |
77 owner_->DidResolve(exists, file_info); | |
78 } | |
79 | |
80 URLRequestFileJob* owner_; | |
81 | |
82 base::Lock lock_; | |
83 MessageLoop* owner_loop_; | |
84 }; | |
85 | 51 |
86 URLRequestFileJob::URLRequestFileJob(URLRequest* request, | 52 URLRequestFileJob::URLRequestFileJob(URLRequest* request, |
87 const FilePath& file_path) | 53 const FilePath& file_path) |
88 : URLRequestJob(request, request->context()->network_delegate()), | 54 : URLRequestJob(request, request->context()->network_delegate()), |
89 file_path_(file_path), | 55 file_path_(file_path), |
90 stream_(NULL), | 56 stream_(new FileStream(NULL)), |
91 is_directory_(false), | 57 remaining_bytes_(0), |
92 remaining_bytes_(0) { | 58 weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { |
93 } | 59 } |
94 | 60 |
95 // static | 61 // static |
96 URLRequestJob* URLRequestFileJob::Factory(URLRequest* request, | 62 URLRequestJob* URLRequestFileJob::Factory(URLRequest* request, |
97 const std::string& scheme) { | 63 const std::string& scheme) { |
98 FilePath file_path; | 64 FilePath file_path; |
99 const bool is_file = FileURLToFilePath(request->url(), &file_path); | 65 const bool is_file = FileURLToFilePath(request->url(), &file_path); |
100 | 66 |
101 // Check file access permissions. | 67 // Check file access permissions. |
102 if (!IsFileAccessAllowed(*request, file_path)) | 68 if (!IsFileAccessAllowed(*request, file_path)) |
103 return new URLRequestErrorJob(request, ERR_ACCESS_DENIED); | 69 return new URLRequestErrorJob(request, ERR_ACCESS_DENIED); |
104 | 70 |
105 // We need to decide whether to create URLRequestFileJob for file access or | 71 // We need to decide whether to create URLRequestFileJob for file access or |
106 // URLRequestFileDirJob for directory access. To avoid accessing the | 72 // URLRequestFileDirJob for directory access. To avoid accessing the |
107 // filesystem, we only look at the path string here. | 73 // filesystem, we only look at the path string here. |
108 // The code in the URLRequestFileJob::Start() method discovers that a path, | 74 // The code in the URLRequestFileJob::Start() method discovers that a path, |
109 // which doesn't end with a slash, should really be treated as a directory, | 75 // which doesn't end with a slash, should really be treated as a directory, |
110 // and it then redirects to the URLRequestFileDirJob. | 76 // and it then redirects to the URLRequestFileDirJob. |
111 if (is_file && | 77 if (is_file && |
112 file_util::EndsWithSeparator(file_path) && | 78 file_util::EndsWithSeparator(file_path) && |
113 file_path.IsAbsolute()) | 79 file_path.IsAbsolute()) |
114 return new URLRequestFileDirJob(request, file_path); | 80 return new URLRequestFileDirJob(request, file_path); |
115 | 81 |
116 // Use a regular file request job for all non-directories (including invalid | 82 // Use a regular file request job for all non-directories (including invalid |
117 // file names). | 83 // file names). |
118 return new URLRequestFileJob(request, file_path); | 84 return new URLRequestFileJob(request, file_path); |
119 } | 85 } |
120 | 86 |
121 void URLRequestFileJob::Start() { | 87 void URLRequestFileJob::Start() { |
122 DCHECK(!async_resolver_); | 88 FileMetaInfo* meta_info = new FileMetaInfo; |
123 async_resolver_ = new AsyncResolver(this); | 89 base::WorkerPool::PostTaskAndReply( |
124 base::WorkerPool::PostTask( | |
125 FROM_HERE, | 90 FROM_HERE, |
126 base::Bind(&AsyncResolver::Resolve, async_resolver_.get(), file_path_), | 91 base::Bind(&URLRequestFileJob::FetchMetaInfo, file_path_, |
92 base::Unretained(meta_info)), | |
93 base::Bind(&URLRequestFileJob::DidFetchMetaInfo, | |
94 weak_ptr_factory_.GetWeakPtr(), | |
95 base::Owned(meta_info)), | |
127 true); | 96 true); |
128 } | 97 } |
129 | 98 |
130 void URLRequestFileJob::Kill() { | 99 void URLRequestFileJob::Kill() { |
131 // URL requests should not block on the disk! | 100 stream_.reset(); |
132 // http://code.google.com/p/chromium/issues/detail?id=59849 | 101 weak_ptr_factory_.InvalidateWeakPtrs(); |
133 base::ThreadRestrictions::ScopedAllowIO allow_io; | |
134 stream_.CloseSync(); | |
135 | |
136 if (async_resolver_) { | |
137 async_resolver_->Cancel(); | |
138 async_resolver_ = NULL; | |
139 } | |
140 | 102 |
141 URLRequestJob::Kill(); | 103 URLRequestJob::Kill(); |
142 } | 104 } |
143 | 105 |
144 bool URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size, | 106 bool URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size, |
145 int *bytes_read) { | 107 int *bytes_read) { |
146 DCHECK_NE(dest_size, 0); | 108 DCHECK_NE(dest_size, 0); |
147 DCHECK(bytes_read); | 109 DCHECK(bytes_read); |
148 DCHECK_GE(remaining_bytes_, 0); | 110 DCHECK_GE(remaining_bytes_, 0); |
149 | 111 |
150 if (remaining_bytes_ < dest_size) | 112 if (remaining_bytes_ < dest_size) |
151 dest_size = static_cast<int>(remaining_bytes_); | 113 dest_size = static_cast<int>(remaining_bytes_); |
152 | 114 |
153 // If we should copy zero bytes because |remaining_bytes_| is zero, short | 115 // If we should copy zero bytes because |remaining_bytes_| is zero, short |
154 // circuit here. | 116 // circuit here. |
155 if (!dest_size) { | 117 if (!dest_size) { |
156 *bytes_read = 0; | 118 *bytes_read = 0; |
157 return true; | 119 return true; |
158 } | 120 } |
159 | 121 |
160 int rv = stream_.Read(dest, dest_size, | 122 int rv = stream_->Read(dest, dest_size, |
161 base::Bind(&URLRequestFileJob::DidRead, | 123 base::Bind(&URLRequestFileJob::DidRead, |
162 base::Unretained(this))); | 124 weak_ptr_factory_.GetWeakPtr())); |
163 if (rv >= 0) { | 125 if (rv >= 0) { |
164 // Data is immediately available. | 126 // Data is immediately available. |
165 *bytes_read = rv; | 127 *bytes_read = rv; |
166 remaining_bytes_ -= rv; | 128 remaining_bytes_ -= rv; |
167 DCHECK_GE(remaining_bytes_, 0); | 129 DCHECK_GE(remaining_bytes_, 0); |
168 return true; | 130 return true; |
169 } | 131 } |
170 | 132 |
171 // Otherwise, a read error occured. We may just need to wait... | 133 // Otherwise, a read error occured. We may just need to wait... |
172 if (rv == ERR_IO_PENDING) { | 134 if (rv == ERR_IO_PENDING) { |
173 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | 135 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
174 } else { | 136 } else { |
175 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | 137 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
176 } | 138 } |
177 return false; | 139 return false; |
178 } | 140 } |
179 | 141 |
180 bool URLRequestFileJob::IsRedirectResponse(GURL* location, | 142 bool URLRequestFileJob::IsRedirectResponse(GURL* location, |
181 int* http_status_code) { | 143 int* http_status_code) { |
182 if (is_directory_) { | 144 if (meta_info_.is_directory) { |
183 // This happens when we discovered the file is a directory, so needs a | 145 // This happens when we discovered the file is a directory, so needs a |
184 // slash at the end of the path. | 146 // slash at the end of the path. |
185 std::string new_path = request_->url().path(); | 147 std::string new_path = request_->url().path(); |
186 new_path.push_back('/'); | 148 new_path.push_back('/'); |
187 GURL::Replacements replacements; | 149 GURL::Replacements replacements; |
188 replacements.SetPathStr(new_path); | 150 replacements.SetPathStr(new_path); |
189 | 151 |
190 *location = request_->url().ReplaceComponents(replacements); | 152 *location = request_->url().ReplaceComponents(replacements); |
191 *http_status_code = 301; // simulate a permanent redirect | 153 *http_status_code = 301; // simulate a permanent redirect |
192 return true; | 154 return true; |
(...skipping 21 matching lines...) Expand all Loading... | |
214 #endif | 176 #endif |
215 } | 177 } |
216 | 178 |
217 Filter* URLRequestFileJob::SetupFilter() const { | 179 Filter* URLRequestFileJob::SetupFilter() const { |
218 // Bug 9936 - .svgz files needs to be decompressed. | 180 // Bug 9936 - .svgz files needs to be decompressed. |
219 return LowerCaseEqualsASCII(file_path_.Extension(), ".svgz") | 181 return LowerCaseEqualsASCII(file_path_.Extension(), ".svgz") |
220 ? Filter::GZipFactory() : NULL; | 182 ? Filter::GZipFactory() : NULL; |
221 } | 183 } |
222 | 184 |
223 bool URLRequestFileJob::GetMimeType(std::string* mime_type) const { | 185 bool URLRequestFileJob::GetMimeType(std::string* mime_type) const { |
224 // URL requests should not block on the disk! On Windows this goes to the | |
225 // registry. | |
226 // http://code.google.com/p/chromium/issues/detail?id=59849 | |
227 base::ThreadRestrictions::ScopedAllowIO allow_io; | |
228 DCHECK(request_); | 186 DCHECK(request_); |
229 return GetMimeTypeFromFile(file_path_, mime_type); | 187 if (meta_info_.mime_type_result) { |
188 *mime_type = meta_info_.mime_type; | |
189 return true; | |
190 } | |
191 return false; | |
230 } | 192 } |
231 | 193 |
232 void URLRequestFileJob::SetExtraRequestHeaders( | 194 void URLRequestFileJob::SetExtraRequestHeaders( |
233 const HttpRequestHeaders& headers) { | 195 const HttpRequestHeaders& headers) { |
234 std::string range_header; | 196 std::string range_header; |
235 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) { | 197 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) { |
236 // We only care about "Range" header here. | 198 // We only care about "Range" header here. |
237 std::vector<HttpByteRange> ranges; | 199 std::vector<HttpByteRange> ranges; |
238 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) { | 200 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) { |
239 if (ranges.size() == 1) { | 201 if (ranges.size() == 1) { |
(...skipping 16 matching lines...) Expand all Loading... | |
256 const URLRequestContext* context = request.context(); | 218 const URLRequestContext* context = request.context(); |
257 if (!context) | 219 if (!context) |
258 return false; | 220 return false; |
259 const NetworkDelegate* delegate = context->network_delegate(); | 221 const NetworkDelegate* delegate = context->network_delegate(); |
260 if (delegate) | 222 if (delegate) |
261 return delegate->CanAccessFile(request, path); | 223 return delegate->CanAccessFile(request, path); |
262 return false; | 224 return false; |
263 } | 225 } |
264 | 226 |
265 URLRequestFileJob::~URLRequestFileJob() { | 227 URLRequestFileJob::~URLRequestFileJob() { |
266 DCHECK(!async_resolver_); | |
267 } | 228 } |
268 | 229 |
269 void URLRequestFileJob::DidResolve( | 230 void URLRequestFileJob::FetchMetaInfo(const FilePath& file_path, |
270 bool exists, const base::PlatformFileInfo& file_info) { | 231 FileMetaInfo* meta_info) { |
271 async_resolver_ = NULL; | 232 base::PlatformFileInfo platform_info; |
233 meta_info->file_exists = file_util::GetFileInfo(file_path, &platform_info); | |
234 meta_info->file_size = platform_info.size; | |
235 meta_info->is_directory = platform_info.is_directory; | |
236 // On Windows GetMimeTypeFromFile() goes to the registry. Thus it should be | |
237 // done in WorkerPool. | |
238 meta_info->mime_type_result = GetMimeTypeFromFile(file_path, | |
239 &meta_info->mime_type); | |
240 } | |
272 | 241 |
273 // We may have been orphaned... | 242 void URLRequestFileJob::DidFetchMetaInfo(const FileMetaInfo* meta_info) { |
274 if (!request_) | 243 meta_info_ = *meta_info; |
275 return; | |
276 | 244 |
277 is_directory_ = file_info.is_directory; | |
278 | |
279 int rv = OK; | |
280 // We use URLRequestFileJob to handle files as well as directories without | 245 // We use URLRequestFileJob to handle files as well as directories without |
281 // trailing slash. | 246 // trailing slash. |
282 // If a directory does not exist, we return ERR_FILE_NOT_FOUND. Otherwise, | 247 // If a directory does not exist, we return ERR_FILE_NOT_FOUND. Otherwise, |
283 // we will append trailing slash and redirect to FileDirJob. | 248 // we will append trailing slash and redirect to FileDirJob. |
284 // A special case is "\" on Windows. We should resolve as invalid. | 249 // A special case is "\" on Windows. We should resolve as invalid. |
285 // However, Windows resolves "\" to "C:\", thus reports it as existent. | 250 // However, Windows resolves "\" to "C:\", thus reports it as existent. |
286 // So what happens is we append it with trailing slash and redirect it to | 251 // So what happens is we append it with trailing slash and redirect it to |
287 // FileDirJob where it is resolved as invalid. | 252 // FileDirJob where it is resolved as invalid. |
288 if (!exists) { | 253 if (!meta_info_.file_exists) { |
289 rv = ERR_FILE_NOT_FOUND; | 254 DidOpen(ERR_FILE_NOT_FOUND); |
290 } else if (!is_directory_) { | 255 return; |
291 // URL requests should not block on the disk! | |
292 // http://code.google.com/p/chromium/issues/detail?id=59849 | |
293 base::ThreadRestrictions::ScopedAllowIO allow_io; | |
294 | |
295 int flags = base::PLATFORM_FILE_OPEN | | |
296 base::PLATFORM_FILE_READ | | |
297 base::PLATFORM_FILE_ASYNC; | |
298 rv = stream_.OpenSync(file_path_, flags); | |
299 } | 256 } |
300 | 257 if (meta_info_.is_directory) { |
301 if (rv != OK) { | 258 DidOpen(OK); |
302 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | |
303 return; | 259 return; |
304 } | 260 } |
305 | 261 |
306 if (!byte_range_.ComputeBounds(file_info.size)) { | 262 int flags = base::PLATFORM_FILE_OPEN | |
263 base::PLATFORM_FILE_READ | | |
264 base::PLATFORM_FILE_ASYNC; | |
265 int rv = stream_->Open(file_path_, flags, | |
266 base::Bind(&URLRequestFileJob::DidOpen, | |
267 weak_ptr_factory_.GetWeakPtr())); | |
268 if (rv != ERR_IO_PENDING) | |
269 DidOpen(rv); | |
270 } | |
271 | |
272 void URLRequestFileJob::DidOpen(int result) { | |
273 if (result != OK) { | |
274 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); | |
275 return; | |
276 } | |
277 | |
278 if (!byte_range_.ComputeBounds(meta_info_.file_size)) { | |
307 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | 279 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, |
308 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | 280 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); |
309 return; | 281 return; |
310 } | 282 } |
311 | 283 |
312 remaining_bytes_ = byte_range_.last_byte_position() - | 284 remaining_bytes_ = byte_range_.last_byte_position() - |
313 byte_range_.first_byte_position() + 1; | 285 byte_range_.first_byte_position() + 1; |
314 DCHECK_GE(remaining_bytes_, 0); | 286 DCHECK_GE(remaining_bytes_, 0); |
315 | 287 |
316 // URL requests should not block on the disk! | 288 if (remaining_bytes_ > 0 && byte_range_.first_byte_position() != 0) { |
317 // http://code.google.com/p/chromium/issues/detail?id=59849 | 289 int rv = stream_->Seek(FROM_BEGIN, byte_range_.first_byte_position(), |
318 { | 290 base::Bind(&URLRequestFileJob::DidSeek, |
319 base::ThreadRestrictions::ScopedAllowIO allow_io; | 291 weak_ptr_factory_.GetWeakPtr())); |
320 // Do the seek at the beginning of the request. | 292 if (rv != ERR_IO_PENDING) { |
321 if (remaining_bytes_ > 0 && | 293 // stream_.Seek() didn't finish successfully, so pass an intentionally |
wtc
2012/08/09 16:05:43
stream_.Seek => stream_->Seek
| |
322 byte_range_.first_byte_position() != 0 && | 294 // erroneous value into DidSeek(). |
323 byte_range_.first_byte_position() != | 295 DidSeek(-1); |
324 stream_.SeekSync(FROM_BEGIN, byte_range_.first_byte_position())) { | |
325 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | |
326 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | |
327 return; | |
328 } | 296 } |
297 } else { | |
298 // We didn't need to call stream_.Seek() at all, so let's pretend we | |
299 // actually did that and the FileStream has positioned where we need it | |
300 // to be (it won't be true only when remaining_bytes_ <= 0). | |
301 DidSeek(byte_range_.first_byte_position()); | |
302 } | |
303 } | |
304 | |
305 void URLRequestFileJob::DidSeek(int64 result) { | |
306 if (result != byte_range_.first_byte_position()) { | |
307 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | |
308 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | |
309 return; | |
329 } | 310 } |
330 | 311 |
331 set_expected_content_size(remaining_bytes_); | 312 set_expected_content_size(remaining_bytes_); |
332 NotifyHeadersComplete(); | 313 NotifyHeadersComplete(); |
333 } | 314 } |
334 | 315 |
335 void URLRequestFileJob::DidRead(int result) { | 316 void URLRequestFileJob::DidRead(int result) { |
336 if (result > 0) { | 317 if (result > 0) { |
337 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status | 318 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status |
338 } else if (result == 0) { | 319 } else if (result == 0) { |
339 NotifyDone(URLRequestStatus()); | 320 NotifyDone(URLRequestStatus()); |
340 } else { | 321 } else { |
341 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); | 322 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); |
342 } | 323 } |
343 | 324 |
344 remaining_bytes_ -= result; | 325 remaining_bytes_ -= result; |
345 DCHECK_GE(remaining_bytes_, 0); | 326 DCHECK_GE(remaining_bytes_, 0); |
346 | 327 |
347 NotifyReadComplete(result); | 328 NotifyReadComplete(result); |
348 } | 329 } |
349 | 330 |
350 } // namespace net | 331 } // namespace net |
OLD | NEW |