Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(279)

Side by Side Diff: net/url_request/url_request_file_job.cc

Issue 5755004: net: Add namespace net to URLRequestFileJob. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 10 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « net/url_request/url_request_file_job.h ('k') | net/url_request/url_request_job_manager.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // For loading files, we make use of overlapped i/o to ensure that reading from 5 // For loading files, we make use of overlapped i/o to ensure that reading from
6 // the filesystem (e.g., a network filesystem) does not block the calling 6 // the filesystem (e.g., a network filesystem) does not block the calling
7 // thread. An alternative approach would be to use a background thread or pool 7 // thread. An alternative approach would be to use a background thread or pool
8 // of threads, but it seems better to leverage the operating system's ability 8 // of threads, but it seems better to leverage the operating system's ability
9 // to do background file reads for us. 9 // to do background file reads for us.
10 // 10 //
11 // Since overlapped reads require a 'static' buffer for the duration of the 11 // Since overlapped reads require a 'static' buffer for the duration of the
(...skipping 21 matching lines...) Expand all
33 #include "net/base/net_util.h" 33 #include "net/base/net_util.h"
34 #include "net/http/http_util.h" 34 #include "net/http/http_util.h"
35 #include "net/url_request/url_request.h" 35 #include "net/url_request/url_request.h"
36 #include "net/url_request/url_request_error_job.h" 36 #include "net/url_request/url_request_error_job.h"
37 #include "net/url_request/url_request_file_dir_job.h" 37 #include "net/url_request/url_request_file_dir_job.h"
38 38
39 #if defined(OS_WIN) 39 #if defined(OS_WIN)
40 #include "base/worker_pool.h" 40 #include "base/worker_pool.h"
41 #endif 41 #endif
42 42
43 namespace net {
44
43 #if defined(OS_WIN) 45 #if defined(OS_WIN)
44 class URLRequestFileJob::AsyncResolver 46 class URLRequestFileJob::AsyncResolver
45 : public base::RefCountedThreadSafe<URLRequestFileJob::AsyncResolver> { 47 : public base::RefCountedThreadSafe<URLRequestFileJob::AsyncResolver> {
46 public: 48 public:
47 explicit AsyncResolver(URLRequestFileJob* owner) 49 explicit AsyncResolver(URLRequestFileJob* owner)
48 : owner_(owner), owner_loop_(MessageLoop::current()) { 50 : owner_(owner), owner_loop_(MessageLoop::current()) {
49 } 51 }
50 52
51 void Resolve(const FilePath& file_path) { 53 void Resolve(const FilePath& file_path) {
52 base::PlatformFileInfo file_info; 54 base::PlatformFileInfo file_info;
(...skipping 23 matching lines...) Expand all
76 } 78 }
77 79
78 URLRequestFileJob* owner_; 80 URLRequestFileJob* owner_;
79 81
80 Lock lock_; 82 Lock lock_;
81 MessageLoop* owner_loop_; 83 MessageLoop* owner_loop_;
82 }; 84 };
83 #endif 85 #endif
84 86
85 // static 87 // static
86 net::URLRequestJob* URLRequestFileJob::Factory(net::URLRequest* request, 88 URLRequestJob* URLRequestFileJob::Factory(URLRequest* request,
87 const std::string& scheme) { 89 const std::string& scheme) {
88 90
89 FilePath file_path; 91 FilePath file_path;
90 const bool is_file = net::FileURLToFilePath(request->url(), &file_path); 92 const bool is_file = FileURLToFilePath(request->url(), &file_path);
91 93
92 #if defined(OS_CHROMEOS) 94 #if defined(OS_CHROMEOS)
93 // Check file access. 95 // Check file access.
94 if (AccessDisabled(file_path)) 96 if (AccessDisabled(file_path))
95 return new URLRequestErrorJob(request, net::ERR_ACCESS_DENIED); 97 return new URLRequestErrorJob(request, ERR_ACCESS_DENIED);
96 #endif 98 #endif
97 99
98 // We need to decide whether to create URLRequestFileJob for file access or 100 // We need to decide whether to create URLRequestFileJob for file access or
99 // URLRequestFileDirJob for directory access. To avoid accessing the 101 // URLRequestFileDirJob for directory access. To avoid accessing the
100 // filesystem, we only look at the path string here. 102 // filesystem, we only look at the path string here.
101 // The code in the URLRequestFileJob::Start() method discovers that a path, 103 // The code in the URLRequestFileJob::Start() method discovers that a path,
102 // which doesn't end with a slash, should really be treated as a directory, 104 // which doesn't end with a slash, should really be treated as a directory,
103 // and it then redirects to the URLRequestFileDirJob. 105 // and it then redirects to the URLRequestFileDirJob.
104 if (is_file && 106 if (is_file &&
105 file_util::EndsWithSeparator(file_path) && 107 file_util::EndsWithSeparator(file_path) &&
106 file_path.IsAbsolute()) 108 file_path.IsAbsolute())
107 return new URLRequestFileDirJob(request, file_path); 109 return new URLRequestFileDirJob(request, file_path);
108 110
109 // Use a regular file request job for all non-directories (including invalid 111 // Use a regular file request job for all non-directories (including invalid
110 // file names). 112 // file names).
111 return new URLRequestFileJob(request, file_path); 113 return new URLRequestFileJob(request, file_path);
112 } 114 }
113 115
114 URLRequestFileJob::URLRequestFileJob(net::URLRequest* request, 116 URLRequestFileJob::URLRequestFileJob(URLRequest* request,
115 const FilePath& file_path) 117 const FilePath& file_path)
116 : net::URLRequestJob(request), 118 : URLRequestJob(request),
117 file_path_(file_path), 119 file_path_(file_path),
118 ALLOW_THIS_IN_INITIALIZER_LIST( 120 ALLOW_THIS_IN_INITIALIZER_LIST(
119 io_callback_(this, &URLRequestFileJob::DidRead)), 121 io_callback_(this, &URLRequestFileJob::DidRead)),
120 is_directory_(false), 122 is_directory_(false),
121 remaining_bytes_(0), 123 remaining_bytes_(0),
122 ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) { 124 ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) {
123 } 125 }
124 126
125 URLRequestFileJob::~URLRequestFileJob() { 127 URLRequestFileJob::~URLRequestFileJob() {
126 #if defined(OS_WIN) 128 #if defined(OS_WIN)
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
159 void URLRequestFileJob::Kill() { 161 void URLRequestFileJob::Kill() {
160 stream_.Close(); 162 stream_.Close();
161 163
162 #if defined(OS_WIN) 164 #if defined(OS_WIN)
163 if (async_resolver_) { 165 if (async_resolver_) {
164 async_resolver_->Cancel(); 166 async_resolver_->Cancel();
165 async_resolver_ = NULL; 167 async_resolver_ = NULL;
166 } 168 }
167 #endif 169 #endif
168 170
169 net::URLRequestJob::Kill(); 171 URLRequestJob::Kill();
170 method_factory_.RevokeAll(); 172 method_factory_.RevokeAll();
171 } 173 }
172 174
173 bool URLRequestFileJob::ReadRawData(net::IOBuffer* dest, int dest_size, 175 bool URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size,
174 int *bytes_read) { 176 int *bytes_read) {
175 DCHECK_NE(dest_size, 0); 177 DCHECK_NE(dest_size, 0);
176 DCHECK(bytes_read); 178 DCHECK(bytes_read);
177 DCHECK_GE(remaining_bytes_, 0); 179 DCHECK_GE(remaining_bytes_, 0);
178 180
179 if (remaining_bytes_ < dest_size) 181 if (remaining_bytes_ < dest_size)
180 dest_size = static_cast<int>(remaining_bytes_); 182 dest_size = static_cast<int>(remaining_bytes_);
181 183
182 // If we should copy zero bytes because |remaining_bytes_| is zero, short 184 // If we should copy zero bytes because |remaining_bytes_| is zero, short
183 // circuit here. 185 // circuit here.
184 if (!dest_size) { 186 if (!dest_size) {
185 *bytes_read = 0; 187 *bytes_read = 0;
186 return true; 188 return true;
187 } 189 }
188 190
189 int rv = stream_.Read(dest->data(), dest_size, &io_callback_); 191 int rv = stream_.Read(dest->data(), dest_size, &io_callback_);
190 if (rv >= 0) { 192 if (rv >= 0) {
191 // Data is immediately available. 193 // Data is immediately available.
192 *bytes_read = rv; 194 *bytes_read = rv;
193 remaining_bytes_ -= rv; 195 remaining_bytes_ -= rv;
194 DCHECK_GE(remaining_bytes_, 0); 196 DCHECK_GE(remaining_bytes_, 0);
195 return true; 197 return true;
196 } 198 }
197 199
198 // Otherwise, a read error occured. We may just need to wait... 200 // Otherwise, a read error occured. We may just need to wait...
199 if (rv == net::ERR_IO_PENDING) { 201 if (rv == ERR_IO_PENDING) {
200 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); 202 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
201 } else { 203 } else {
202 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 204 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
203 } 205 }
204 return false; 206 return false;
205 } 207 }
206 208
207 bool URLRequestFileJob::GetContentEncodings( 209 bool URLRequestFileJob::GetContentEncodings(
208 std::vector<Filter::FilterType>* encoding_types) { 210 std::vector<Filter::FilterType>* encoding_types) {
209 DCHECK(encoding_types->empty()); 211 DCHECK(encoding_types->empty());
210 212
211 // Bug 9936 - .svgz files needs to be decompressed. 213 // Bug 9936 - .svgz files needs to be decompressed.
212 if (LowerCaseEqualsASCII(file_path_.Extension(), ".svgz")) 214 if (LowerCaseEqualsASCII(file_path_.Extension(), ".svgz"))
213 encoding_types->push_back(Filter::FILTER_TYPE_GZIP); 215 encoding_types->push_back(Filter::FILTER_TYPE_GZIP);
214 216
215 return !encoding_types->empty(); 217 return !encoding_types->empty();
216 } 218 }
217 219
218 bool URLRequestFileJob::GetMimeType(std::string* mime_type) const { 220 bool URLRequestFileJob::GetMimeType(std::string* mime_type) const {
219 // URL requests should not block on the disk! On Windows this goes to the 221 // URL requests should not block on the disk! On Windows this goes to the
220 // registry. 222 // registry.
221 // http://code.google.com/p/chromium/issues/detail?id=59849 223 // http://code.google.com/p/chromium/issues/detail?id=59849
222 base::ThreadRestrictions::ScopedAllowIO allow_io; 224 base::ThreadRestrictions::ScopedAllowIO allow_io;
223 DCHECK(request_); 225 DCHECK(request_);
224 return net::GetMimeTypeFromFile(file_path_, mime_type); 226 return GetMimeTypeFromFile(file_path_, mime_type);
225 } 227 }
226 228
227 void URLRequestFileJob::SetExtraRequestHeaders( 229 void URLRequestFileJob::SetExtraRequestHeaders(
228 const net::HttpRequestHeaders& headers) { 230 const HttpRequestHeaders& headers) {
229 std::string range_header; 231 std::string range_header;
230 if (headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) { 232 if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) {
231 // We only care about "Range" header here. 233 // We only care about "Range" header here.
232 std::vector<net::HttpByteRange> ranges; 234 std::vector<HttpByteRange> ranges;
233 if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) { 235 if (HttpUtil::ParseRangeHeader(range_header, &ranges)) {
234 if (ranges.size() == 1) { 236 if (ranges.size() == 1) {
235 byte_range_ = ranges[0]; 237 byte_range_ = ranges[0];
236 } else { 238 } else {
237 // We don't support multiple range requests in one single URL request, 239 // We don't support multiple range requests in one single URL request,
238 // because we need to do multipart encoding here. 240 // because we need to do multipart encoding here.
239 // TODO(hclam): decide whether we want to support multiple range 241 // TODO(hclam): decide whether we want to support multiple range
240 // requests. 242 // requests.
241 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, 243 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
242 net::ERR_REQUEST_RANGE_NOT_SATISFIABLE)); 244 ERR_REQUEST_RANGE_NOT_SATISFIABLE));
243 } 245 }
244 } 246 }
245 } 247 }
246 } 248 }
247 249
248 void URLRequestFileJob::DidResolve( 250 void URLRequestFileJob::DidResolve(
249 bool exists, const base::PlatformFileInfo& file_info) { 251 bool exists, const base::PlatformFileInfo& file_info) {
250 #if defined(OS_WIN) 252 #if defined(OS_WIN)
251 async_resolver_ = NULL; 253 async_resolver_ = NULL;
252 #endif 254 #endif
253 255
254 // We may have been orphaned... 256 // We may have been orphaned...
255 if (!request_) 257 if (!request_)
256 return; 258 return;
257 259
258 is_directory_ = file_info.is_directory; 260 is_directory_ = file_info.is_directory;
259 261
260 int rv = net::OK; 262 int rv = OK;
261 // We use URLRequestFileJob to handle files as well as directories without 263 // We use URLRequestFileJob to handle files as well as directories without
262 // trailing slash. 264 // trailing slash.
263 // If a directory does not exist, we return ERR_FILE_NOT_FOUND. Otherwise, 265 // If a directory does not exist, we return ERR_FILE_NOT_FOUND. Otherwise,
264 // we will append trailing slash and redirect to FileDirJob. 266 // we will append trailing slash and redirect to FileDirJob.
265 // A special case is "\" on Windows. We should resolve as invalid. 267 // A special case is "\" on Windows. We should resolve as invalid.
266 // However, Windows resolves "\" to "C:\", thus reports it as existent. 268 // However, Windows resolves "\" to "C:\", thus reports it as existent.
267 // So what happens is we append it with trailing slash and redirect it to 269 // So what happens is we append it with trailing slash and redirect it to
268 // FileDirJob where it is resolved as invalid. 270 // FileDirJob where it is resolved as invalid.
269 if (!exists) { 271 if (!exists) {
270 rv = net::ERR_FILE_NOT_FOUND; 272 rv = ERR_FILE_NOT_FOUND;
271 } else if (!is_directory_) { 273 } else if (!is_directory_) {
272 // URL requests should not block on the disk! 274 // URL requests should not block on the disk!
273 // http://code.google.com/p/chromium/issues/detail?id=59849 275 // http://code.google.com/p/chromium/issues/detail?id=59849
274 base::ThreadRestrictions::ScopedAllowIO allow_io; 276 base::ThreadRestrictions::ScopedAllowIO allow_io;
275 277
276 int flags = base::PLATFORM_FILE_OPEN | 278 int flags = base::PLATFORM_FILE_OPEN |
277 base::PLATFORM_FILE_READ | 279 base::PLATFORM_FILE_READ |
278 base::PLATFORM_FILE_ASYNC; 280 base::PLATFORM_FILE_ASYNC;
279 rv = stream_.Open(file_path_, flags); 281 rv = stream_.Open(file_path_, flags);
280 } 282 }
281 283
282 if (rv != net::OK) { 284 if (rv != OK) {
283 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 285 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
284 return; 286 return;
285 } 287 }
286 288
287 if (!byte_range_.ComputeBounds(file_info.size)) { 289 if (!byte_range_.ComputeBounds(file_info.size)) {
288 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, 290 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
289 net::ERR_REQUEST_RANGE_NOT_SATISFIABLE)); 291 ERR_REQUEST_RANGE_NOT_SATISFIABLE));
290 return; 292 return;
291 } 293 }
292 294
293 remaining_bytes_ = byte_range_.last_byte_position() - 295 remaining_bytes_ = byte_range_.last_byte_position() -
294 byte_range_.first_byte_position() + 1; 296 byte_range_.first_byte_position() + 1;
295 DCHECK_GE(remaining_bytes_, 0); 297 DCHECK_GE(remaining_bytes_, 0);
296 298
297 // Do the seek at the beginning of the request. 299 // Do the seek at the beginning of the request.
298 if (remaining_bytes_ > 0 && 300 if (remaining_bytes_ > 0 &&
299 byte_range_.first_byte_position() != 0 && 301 byte_range_.first_byte_position() != 0 &&
300 byte_range_.first_byte_position() != 302 byte_range_.first_byte_position() !=
301 stream_.Seek(net::FROM_BEGIN, byte_range_.first_byte_position())) { 303 stream_.Seek(FROM_BEGIN, byte_range_.first_byte_position())) {
302 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, 304 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
303 net::ERR_REQUEST_RANGE_NOT_SATISFIABLE)); 305 ERR_REQUEST_RANGE_NOT_SATISFIABLE));
304 return; 306 return;
305 } 307 }
306 308
307 set_expected_content_size(remaining_bytes_); 309 set_expected_content_size(remaining_bytes_);
308 NotifyHeadersComplete(); 310 NotifyHeadersComplete();
309 } 311 }
310 312
311 void URLRequestFileJob::DidRead(int result) { 313 void URLRequestFileJob::DidRead(int result) {
312 if (result > 0) { 314 if (result > 0) {
313 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 315 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
345 return false; 347 return false;
346 348
347 FilePath new_path = file_path_; 349 FilePath new_path = file_path_;
348 bool resolved; 350 bool resolved;
349 resolved = file_util::ResolveShortcut(&new_path); 351 resolved = file_util::ResolveShortcut(&new_path);
350 352
351 // If shortcut is not resolved succesfully, do not redirect. 353 // If shortcut is not resolved succesfully, do not redirect.
352 if (!resolved) 354 if (!resolved)
353 return false; 355 return false;
354 356
355 *location = net::FilePathToFileURL(new_path); 357 *location = FilePathToFileURL(new_path);
356 *http_status_code = 301; 358 *http_status_code = 301;
357 return true; 359 return true;
358 #else 360 #else
359 return false; 361 return false;
360 #endif 362 #endif
361 } 363 }
362 364
363 #if defined(OS_CHROMEOS) 365 #if defined(OS_CHROMEOS)
364 static const char* const kLocalAccessWhiteList[] = { 366 static const char* const kLocalAccessWhiteList[] = {
365 "/home/chronos/user/Downloads", 367 "/home/chronos/user/Downloads",
366 "/mnt/partner_partition", 368 "/mnt/partner_partition",
367 "/usr/share/chromeos-assets", 369 "/usr/share/chromeos-assets",
368 "/tmp", 370 "/tmp",
369 "/var/log", 371 "/var/log",
370 }; 372 };
371 373
372 // static 374 // static
373 bool URLRequestFileJob::AccessDisabled(const FilePath& file_path) { 375 bool URLRequestFileJob::AccessDisabled(const FilePath& file_path) {
374 if (net::URLRequest::IsFileAccessAllowed()) { // for tests. 376 if (URLRequest::IsFileAccessAllowed()) { // for tests.
375 return false; 377 return false;
376 } 378 }
377 379
378 for (size_t i = 0; i < arraysize(kLocalAccessWhiteList); ++i) { 380 for (size_t i = 0; i < arraysize(kLocalAccessWhiteList); ++i) {
379 const FilePath white_listed_path(kLocalAccessWhiteList[i]); 381 const FilePath white_listed_path(kLocalAccessWhiteList[i]);
380 // FilePath::operator== should probably handle trailing seperators. 382 // FilePath::operator== should probably handle trailing seperators.
381 if (white_listed_path == file_path.StripTrailingSeparators() || 383 if (white_listed_path == file_path.StripTrailingSeparators() ||
382 white_listed_path.IsParent(file_path)) { 384 white_listed_path.IsParent(file_path)) {
383 return false; 385 return false;
384 } 386 }
385 } 387 }
386 return true; 388 return true;
387 } 389 }
388 #endif 390 #endif
391
392 } // namespace net
OLDNEW
« no previous file with comments | « net/url_request/url_request_file_job.h ('k') | net/url_request/url_request_job_manager.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698