OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // For loading files, we make use of overlapped i/o to ensure that reading from | 5 // For loading files, we make use of overlapped i/o to ensure that reading from |
6 // the filesystem (e.g., a network filesystem) does not block the calling | 6 // the filesystem (e.g., a network filesystem) does not block the calling |
7 // thread. An alternative approach would be to use a background thread or pool | 7 // thread. An alternative approach would be to use a background thread or pool |
8 // of threads, but it seems better to leverage the operating system's ability | 8 // of threads, but it seems better to leverage the operating system's ability |
9 // to do background file reads for us. | 9 // to do background file reads for us. |
10 // | 10 // |
(...skipping 24 matching lines...) Expand all Loading... | |
35 #include "net/base/net_errors.h" | 35 #include "net/base/net_errors.h" |
36 #include "net/base/net_util.h" | 36 #include "net/base/net_util.h" |
37 #include "net/http/http_util.h" | 37 #include "net/http/http_util.h" |
38 #include "net/url_request/url_request.h" | 38 #include "net/url_request/url_request.h" |
39 #include "net/url_request/url_request_context.h" | 39 #include "net/url_request/url_request_context.h" |
40 #include "net/url_request/url_request_error_job.h" | 40 #include "net/url_request/url_request_error_job.h" |
41 #include "net/url_request/url_request_file_dir_job.h" | 41 #include "net/url_request/url_request_file_dir_job.h" |
42 | 42 |
43 namespace net { | 43 namespace net { |
44 | 44 |
45 class URLRequestFileJob::AsyncResolver | 45 URLRequestFileJob::FileMetaInfo::FileMetaInfo() |
46 : public base::RefCountedThreadSafe<URLRequestFileJob::AsyncResolver> { | 46 : file_size(-1), |
47 public: | 47 mime_type_result(false), |
48 explicit AsyncResolver(URLRequestFileJob* owner) | 48 file_exists(false), |
49 : owner_(owner), owner_loop_(MessageLoop::current()) { | 49 is_directory(false) { |
50 } | 50 } |
51 | |
52 void Resolve(const FilePath& file_path) { | |
53 base::PlatformFileInfo file_info; | |
54 bool exists = file_util::GetFileInfo(file_path, &file_info); | |
55 base::AutoLock locked(lock_); | |
56 if (owner_loop_) { | |
57 owner_loop_->PostTask( | |
58 FROM_HERE, | |
59 base::Bind(&AsyncResolver::ReturnResults, this, exists, file_info)); | |
60 } | |
61 } | |
62 | |
63 void Cancel() { | |
64 owner_ = NULL; | |
65 | |
66 base::AutoLock locked(lock_); | |
67 owner_loop_ = NULL; | |
68 } | |
69 | |
70 private: | |
71 friend class base::RefCountedThreadSafe<URLRequestFileJob::AsyncResolver>; | |
72 | |
73 ~AsyncResolver() {} | |
74 | |
75 void ReturnResults(bool exists, const base::PlatformFileInfo& file_info) { | |
76 if (owner_) | |
77 owner_->DidResolve(exists, file_info); | |
78 } | |
79 | |
80 URLRequestFileJob* owner_; | |
81 | |
82 base::Lock lock_; | |
83 MessageLoop* owner_loop_; | |
84 }; | |
85 | 51 |
86 URLRequestFileJob::URLRequestFileJob(URLRequest* request, | 52 URLRequestFileJob::URLRequestFileJob(URLRequest* request, |
87 const FilePath& file_path) | 53 const FilePath& file_path) |
88 : URLRequestJob(request, request->context()->network_delegate()), | 54 : URLRequestJob(request, request->context()->network_delegate()), |
89 file_path_(file_path), | 55 file_path_(file_path), |
90 stream_(NULL), | 56 stream_(new FileStream(NULL)), |
91 is_directory_(false), | 57 remaining_bytes_(0), |
92 remaining_bytes_(0) { | 58 weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { |
93 } | 59 } |
94 | 60 |
95 // static | 61 // static |
96 URLRequestJob* URLRequestFileJob::Factory(URLRequest* request, | 62 URLRequestJob* URLRequestFileJob::Factory(URLRequest* request, |
97 const std::string& scheme) { | 63 const std::string& scheme) { |
98 FilePath file_path; | 64 FilePath file_path; |
99 const bool is_file = FileURLToFilePath(request->url(), &file_path); | 65 const bool is_file = FileURLToFilePath(request->url(), &file_path); |
100 | 66 |
101 // Check file access permissions. | 67 // Check file access permissions. |
102 if (!IsFileAccessAllowed(*request, file_path)) | 68 if (!IsFileAccessAllowed(*request, file_path)) |
103 return new URLRequestErrorJob(request, ERR_ACCESS_DENIED); | 69 return new URLRequestErrorJob(request, ERR_ACCESS_DENIED); |
104 | 70 |
105 // We need to decide whether to create URLRequestFileJob for file access or | 71 // We need to decide whether to create URLRequestFileJob for file access or |
106 // URLRequestFileDirJob for directory access. To avoid accessing the | 72 // URLRequestFileDirJob for directory access. To avoid accessing the |
107 // filesystem, we only look at the path string here. | 73 // filesystem, we only look at the path string here. |
108 // The code in the URLRequestFileJob::Start() method discovers that a path, | 74 // The code in the URLRequestFileJob::Start() method discovers that a path, |
109 // which doesn't end with a slash, should really be treated as a directory, | 75 // which doesn't end with a slash, should really be treated as a directory, |
110 // and it then redirects to the URLRequestFileDirJob. | 76 // and it then redirects to the URLRequestFileDirJob. |
111 if (is_file && | 77 if (is_file && |
112 file_util::EndsWithSeparator(file_path) && | 78 file_util::EndsWithSeparator(file_path) && |
113 file_path.IsAbsolute()) | 79 file_path.IsAbsolute()) |
114 return new URLRequestFileDirJob(request, file_path); | 80 return new URLRequestFileDirJob(request, file_path); |
115 | 81 |
116 // Use a regular file request job for all non-directories (including invalid | 82 // Use a regular file request job for all non-directories (including invalid |
117 // file names). | 83 // file names). |
118 return new URLRequestFileJob(request, file_path); | 84 return new URLRequestFileJob(request, file_path); |
119 } | 85 } |
120 | 86 |
121 void URLRequestFileJob::Start() { | 87 void URLRequestFileJob::Start() { |
122 DCHECK(!async_resolver_); | 88 FileMetaInfo* meta_info = new FileMetaInfo; |
123 async_resolver_ = new AsyncResolver(this); | 89 base::WorkerPool::PostTaskAndReply( |
124 base::WorkerPool::PostTask( | |
125 FROM_HERE, | 90 FROM_HERE, |
126 base::Bind(&AsyncResolver::Resolve, async_resolver_.get(), file_path_), | 91 base::Bind(&URLRequestFileJob::FetchMetaInfo, file_path_, |
92 base::Unretained(meta_info)), | |
93 base::Bind(&URLRequestFileJob::DidFetchMetaInfo, | |
94 weak_ptr_factory_.GetWeakPtr(), | |
95 base::Owned(meta_info)), | |
127 true); | 96 true); |
128 } | 97 } |
129 | 98 |
130 void URLRequestFileJob::Kill() { | 99 void URLRequestFileJob::Kill() { |
131 // URL requests should not block on the disk! | 100 stream_.reset(); |
wtc
2012/08/14 18:00:40
Could you explain why it is necessary to delete th
pivanof
2012/08/15 06:36:50
We can't call CloseSync() because it will make IO
| |
132 // http://code.google.com/p/chromium/issues/detail?id=59849 | 101 weak_ptr_factory_.InvalidateWeakPtrs(); |
133 base::ThreadRestrictions::ScopedAllowIO allow_io; | |
134 stream_.CloseSync(); | |
135 | |
136 if (async_resolver_) { | |
137 async_resolver_->Cancel(); | |
138 async_resolver_ = NULL; | |
139 } | |
140 | 102 |
141 URLRequestJob::Kill(); | 103 URLRequestJob::Kill(); |
142 } | 104 } |
143 | 105 |
144 bool URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size, | 106 bool URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size, |
145 int *bytes_read) { | 107 int *bytes_read) { |
146 DCHECK_NE(dest_size, 0); | 108 DCHECK_NE(dest_size, 0); |
147 DCHECK(bytes_read); | 109 DCHECK(bytes_read); |
148 DCHECK_GE(remaining_bytes_, 0); | 110 DCHECK_GE(remaining_bytes_, 0); |
149 | 111 |
150 if (remaining_bytes_ < dest_size) | 112 if (remaining_bytes_ < dest_size) |
151 dest_size = static_cast<int>(remaining_bytes_); | 113 dest_size = static_cast<int>(remaining_bytes_); |
152 | 114 |
153 // If we should copy zero bytes because |remaining_bytes_| is zero, short | 115 // If we should copy zero bytes because |remaining_bytes_| is zero, short |
154 // circuit here. | 116 // circuit here. |
155 if (!dest_size) { | 117 if (!dest_size) { |
156 *bytes_read = 0; | 118 *bytes_read = 0; |
157 return true; | 119 return true; |
158 } | 120 } |
159 | 121 |
160 int rv = stream_.Read(dest, dest_size, | 122 int rv = stream_->Read(dest, dest_size, |
161 base::Bind(&URLRequestFileJob::DidRead, | 123 base::Bind(&URLRequestFileJob::DidRead, |
162 base::Unretained(this))); | 124 weak_ptr_factory_.GetWeakPtr())); |
163 if (rv >= 0) { | 125 if (rv >= 0) { |
164 // Data is immediately available. | 126 // Data is immediately available. |
165 *bytes_read = rv; | 127 *bytes_read = rv; |
166 remaining_bytes_ -= rv; | 128 remaining_bytes_ -= rv; |
167 DCHECK_GE(remaining_bytes_, 0); | 129 DCHECK_GE(remaining_bytes_, 0); |
168 return true; | 130 return true; |
169 } | 131 } |
170 | 132 |
171 // Otherwise, a read error occured. We may just need to wait... | 133 // Otherwise, a read error occured. We may just need to wait... |
172 if (rv == ERR_IO_PENDING) { | 134 if (rv == ERR_IO_PENDING) { |
173 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | 135 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
174 } else { | 136 } else { |
175 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | 137 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
176 } | 138 } |
177 return false; | 139 return false; |
178 } | 140 } |
179 | 141 |
180 bool URLRequestFileJob::IsRedirectResponse(GURL* location, | 142 bool URLRequestFileJob::IsRedirectResponse(GURL* location, |
181 int* http_status_code) { | 143 int* http_status_code) { |
182 if (is_directory_) { | 144 if (meta_info_.is_directory) { |
183 // This happens when we discovered the file is a directory, so needs a | 145 // This happens when we discovered the file is a directory, so needs a |
184 // slash at the end of the path. | 146 // slash at the end of the path. |
185 std::string new_path = request_->url().path(); | 147 std::string new_path = request_->url().path(); |
186 new_path.push_back('/'); | 148 new_path.push_back('/'); |
187 GURL::Replacements replacements; | 149 GURL::Replacements replacements; |
188 replacements.SetPathStr(new_path); | 150 replacements.SetPathStr(new_path); |
189 | 151 |
190 *location = request_->url().ReplaceComponents(replacements); | 152 *location = request_->url().ReplaceComponents(replacements); |
191 *http_status_code = 301; // simulate a permanent redirect | 153 *http_status_code = 301; // simulate a permanent redirect |
192 return true; | 154 return true; |
(...skipping 21 matching lines...) Expand all Loading... | |
214 #endif | 176 #endif |
215 } | 177 } |
216 | 178 |
217 Filter* URLRequestFileJob::SetupFilter() const { | 179 Filter* URLRequestFileJob::SetupFilter() const { |
218 // Bug 9936 - .svgz files needs to be decompressed. | 180 // Bug 9936 - .svgz files needs to be decompressed. |
219 return LowerCaseEqualsASCII(file_path_.Extension(), ".svgz") | 181 return LowerCaseEqualsASCII(file_path_.Extension(), ".svgz") |
220 ? Filter::GZipFactory() : NULL; | 182 ? Filter::GZipFactory() : NULL; |
221 } | 183 } |
222 | 184 |
223 bool URLRequestFileJob::GetMimeType(std::string* mime_type) const { | 185 bool URLRequestFileJob::GetMimeType(std::string* mime_type) const { |
224 // URL requests should not block on the disk! On Windows this goes to the | |
225 // registry. | |
226 // http://code.google.com/p/chromium/issues/detail?id=59849 | |
227 base::ThreadRestrictions::ScopedAllowIO allow_io; | |
228 DCHECK(request_); | 186 DCHECK(request_); |
229 return GetMimeTypeFromFile(file_path_, mime_type); | 187 if (meta_info_.mime_type_result) { |
188 *mime_type = meta_info_.mime_type; | |
189 return true; | |
190 } | |
191 return false; | |
wtc
2012/08/14 18:00:40
Nit: this can be rewritten as:
if (meta_info_.m
| |
230 } | 192 } |
231 | 193 |
232 void URLRequestFileJob::SetExtraRequestHeaders( | 194 void URLRequestFileJob::SetExtraRequestHeaders( |
233 const HttpRequestHeaders& headers) { | 195 const HttpRequestHeaders& headers) { |
234 std::string range_header; | 196 std::string range_header; |
235 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) { | 197 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) { |
236 // We only care about "Range" header here. | 198 // We only care about "Range" header here. |
237 std::vector<HttpByteRange> ranges; | 199 std::vector<HttpByteRange> ranges; |
238 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) { | 200 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) { |
239 if (ranges.size() == 1) { | 201 if (ranges.size() == 1) { |
(...skipping 13 matching lines...) Expand all Loading... | |
253 // static | 215 // static |
254 bool URLRequestFileJob::IsFileAccessAllowed(const URLRequest& request, | 216 bool URLRequestFileJob::IsFileAccessAllowed(const URLRequest& request, |
255 const FilePath& path) { | 217 const FilePath& path) { |
256 const NetworkDelegate* delegate = request.context()->network_delegate(); | 218 const NetworkDelegate* delegate = request.context()->network_delegate(); |
257 if (delegate) | 219 if (delegate) |
258 return delegate->CanAccessFile(request, path); | 220 return delegate->CanAccessFile(request, path); |
259 return false; | 221 return false; |
260 } | 222 } |
261 | 223 |
262 URLRequestFileJob::~URLRequestFileJob() { | 224 URLRequestFileJob::~URLRequestFileJob() { |
263 DCHECK(!async_resolver_); | |
264 } | 225 } |
265 | 226 |
266 void URLRequestFileJob::DidResolve( | 227 void URLRequestFileJob::FetchMetaInfo(const FilePath& file_path, |
267 bool exists, const base::PlatformFileInfo& file_info) { | 228 FileMetaInfo* meta_info) { |
268 async_resolver_ = NULL; | 229 base::PlatformFileInfo platform_info; |
230 meta_info->file_exists = file_util::GetFileInfo(file_path, &platform_info); | |
231 meta_info->file_size = platform_info.size; | |
232 meta_info->is_directory = platform_info.is_directory; | |
wtc
2012/08/14 18:00:40
It seems that platform_info may contain junk if th
wtc
2012/11/14 21:05:01
This change is probably worth making. I am not 100
wtc
2012/11/17 02:15:29
pivanof wrote:
| |
233 // On Windows GetMimeTypeFromFile() goes to the registry. Thus it should be | |
234 // done in WorkerPool. | |
235 meta_info->mime_type_result = GetMimeTypeFromFile(file_path, | |
236 &meta_info->mime_type); | |
237 } | |
269 | 238 |
270 // We may have been orphaned... | 239 void URLRequestFileJob::DidFetchMetaInfo(const FileMetaInfo* meta_info) { |
271 if (!request_) | 240 meta_info_ = *meta_info; |
272 return; | |
273 | 241 |
274 is_directory_ = file_info.is_directory; | |
275 | |
276 int rv = OK; | |
277 // We use URLRequestFileJob to handle files as well as directories without | 242 // We use URLRequestFileJob to handle files as well as directories without |
278 // trailing slash. | 243 // trailing slash. |
279 // If a directory does not exist, we return ERR_FILE_NOT_FOUND. Otherwise, | 244 // If a directory does not exist, we return ERR_FILE_NOT_FOUND. Otherwise, |
280 // we will append trailing slash and redirect to FileDirJob. | 245 // we will append trailing slash and redirect to FileDirJob. |
281 // A special case is "\" on Windows. We should resolve as invalid. | 246 // A special case is "\" on Windows. We should resolve as invalid. |
282 // However, Windows resolves "\" to "C:\", thus reports it as existent. | 247 // However, Windows resolves "\" to "C:\", thus reports it as existent. |
283 // So what happens is we append it with trailing slash and redirect it to | 248 // So what happens is we append it with trailing slash and redirect it to |
284 // FileDirJob where it is resolved as invalid. | 249 // FileDirJob where it is resolved as invalid. |
285 if (!exists) { | 250 if (!meta_info_.file_exists) { |
286 rv = ERR_FILE_NOT_FOUND; | 251 DidOpen(ERR_FILE_NOT_FOUND); |
287 } else if (!is_directory_) { | 252 return; |
288 // URL requests should not block on the disk! | |
289 // http://code.google.com/p/chromium/issues/detail?id=59849 | |
290 base::ThreadRestrictions::ScopedAllowIO allow_io; | |
291 | |
292 int flags = base::PLATFORM_FILE_OPEN | | |
293 base::PLATFORM_FILE_READ | | |
294 base::PLATFORM_FILE_ASYNC; | |
295 rv = stream_.OpenSync(file_path_, flags); | |
296 } | 253 } |
297 | 254 if (meta_info_.is_directory) { |
298 if (rv != OK) { | 255 DidOpen(OK); |
299 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | |
300 return; | 256 return; |
301 } | 257 } |
302 | 258 |
303 if (!byte_range_.ComputeBounds(file_info.size)) { | 259 int flags = base::PLATFORM_FILE_OPEN | |
260 base::PLATFORM_FILE_READ | | |
261 base::PLATFORM_FILE_ASYNC; | |
262 int rv = stream_->Open(file_path_, flags, | |
263 base::Bind(&URLRequestFileJob::DidOpen, | |
264 weak_ptr_factory_.GetWeakPtr())); | |
265 if (rv != ERR_IO_PENDING) | |
266 DidOpen(rv); | |
267 } | |
268 | |
269 void URLRequestFileJob::DidOpen(int result) { | |
270 if (result != OK) { | |
271 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); | |
272 return; | |
273 } | |
274 | |
275 if (!byte_range_.ComputeBounds(meta_info_.file_size)) { | |
304 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | 276 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, |
305 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | 277 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); |
306 return; | 278 return; |
307 } | 279 } |
308 | 280 |
309 remaining_bytes_ = byte_range_.last_byte_position() - | 281 remaining_bytes_ = byte_range_.last_byte_position() - |
310 byte_range_.first_byte_position() + 1; | 282 byte_range_.first_byte_position() + 1; |
311 DCHECK_GE(remaining_bytes_, 0); | 283 DCHECK_GE(remaining_bytes_, 0); |
312 | 284 |
313 // URL requests should not block on the disk! | 285 if (remaining_bytes_ > 0 && byte_range_.first_byte_position() != 0) { |
314 // http://code.google.com/p/chromium/issues/detail?id=59849 | 286 int rv = stream_->Seek(FROM_BEGIN, byte_range_.first_byte_position(), |
315 { | 287 base::Bind(&URLRequestFileJob::DidSeek, |
316 base::ThreadRestrictions::ScopedAllowIO allow_io; | 288 weak_ptr_factory_.GetWeakPtr())); |
317 // Do the seek at the beginning of the request. | 289 if (rv != ERR_IO_PENDING) { |
318 if (remaining_bytes_ > 0 && | 290 // stream_->Seek() didn't finish successfully, so pass an intentionally |
wtc
2012/08/14 18:00:40
Nit: didn't finish successfully => failed
| |
319 byte_range_.first_byte_position() != 0 && | 291 // erroneous value into DidSeek(). |
320 byte_range_.first_byte_position() != | 292 DidSeek(-1); |
321 stream_.SeekSync(FROM_BEGIN, byte_range_.first_byte_position())) { | |
322 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | |
323 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | |
324 return; | |
325 } | 293 } |
294 } else { | |
295 // We didn't need to call stream_->Seek() at all, so let's pretend we | |
296 // actually did that and the FileStream has positioned where we need it | |
297 // to be (it won't be true only when remaining_bytes_ <= 0). | |
wtc
2012/08/14 18:00:40
I don't understand the sentence in parentheses
(
pivanof
2012/08/15 06:36:50
What I meant is that statement before parenthesis
wtc
2012/08/17 00:24:04
I suggest something like this:
// We didn't need
| |
298 DidSeek(byte_range_.first_byte_position()); | |
299 } | |
300 } | |
301 | |
302 void URLRequestFileJob::DidSeek(int64 result) { | |
303 if (result != byte_range_.first_byte_position()) { | |
304 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | |
305 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); | |
306 return; | |
326 } | 307 } |
327 | 308 |
328 set_expected_content_size(remaining_bytes_); | 309 set_expected_content_size(remaining_bytes_); |
329 NotifyHeadersComplete(); | 310 NotifyHeadersComplete(); |
330 } | 311 } |
331 | 312 |
332 void URLRequestFileJob::DidRead(int result) { | 313 void URLRequestFileJob::DidRead(int result) { |
333 if (result > 0) { | 314 if (result > 0) { |
334 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status | 315 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status |
335 } else if (result == 0) { | 316 } else if (result == 0) { |
336 NotifyDone(URLRequestStatus()); | 317 NotifyDone(URLRequestStatus()); |
337 } else { | 318 } else { |
338 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); | 319 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); |
339 } | 320 } |
340 | 321 |
341 remaining_bytes_ -= result; | 322 remaining_bytes_ -= result; |
342 DCHECK_GE(remaining_bytes_, 0); | 323 DCHECK_GE(remaining_bytes_, 0); |
343 | 324 |
344 NotifyReadComplete(result); | 325 NotifyReadComplete(result); |
345 } | 326 } |
346 | 327 |
347 } // namespace net | 328 } // namespace net |
OLD | NEW |