Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(195)

Side by Side Diff: net/url_request/url_request_file_job.cc

Issue 5607004: net: Remove typedef net::URLRequestJob URLRequestJob; (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: rebased 2 Created 10 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « net/url_request/url_request_file_job.h ('k') | net/url_request/url_request_ftp_job.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // For loading files, we make use of overlapped i/o to ensure that reading from 5 // For loading files, we make use of overlapped i/o to ensure that reading from
6 // the filesystem (e.g., a network filesystem) does not block the calling 6 // the filesystem (e.g., a network filesystem) does not block the calling
7 // thread. An alternative approach would be to use a background thread or pool 7 // thread. An alternative approach would be to use a background thread or pool
8 // of threads, but it seems better to leverage the operating system's ability 8 // of threads, but it seems better to leverage the operating system's ability
9 // to do background file reads for us. 9 // to do background file reads for us.
10 // 10 //
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
76 } 76 }
77 77
78 URLRequestFileJob* owner_; 78 URLRequestFileJob* owner_;
79 79
80 Lock lock_; 80 Lock lock_;
81 MessageLoop* owner_loop_; 81 MessageLoop* owner_loop_;
82 }; 82 };
83 #endif 83 #endif
84 84
85 // static 85 // static
86 URLRequestJob* URLRequestFileJob::Factory(net::URLRequest* request, 86 net::URLRequestJob* URLRequestFileJob::Factory(net::URLRequest* request,
87 const std::string& scheme) { 87 const std::string& scheme) {
88 88
89 FilePath file_path; 89 FilePath file_path;
90 const bool is_file = net::FileURLToFilePath(request->url(), &file_path); 90 const bool is_file = net::FileURLToFilePath(request->url(), &file_path);
91 91
92 #if defined(OS_CHROMEOS) 92 #if defined(OS_CHROMEOS)
93 // Check file access. 93 // Check file access.
94 if (AccessDisabled(file_path)) 94 if (AccessDisabled(file_path))
95 return new URLRequestErrorJob(request, net::ERR_ACCESS_DENIED); 95 return new URLRequestErrorJob(request, net::ERR_ACCESS_DENIED);
96 #endif 96 #endif
97 97
98 // We need to decide whether to create URLRequestFileJob for file access or 98 // We need to decide whether to create URLRequestFileJob for file access or
99 // URLRequestFileDirJob for directory access. To avoid accessing the 99 // URLRequestFileDirJob for directory access. To avoid accessing the
100 // filesystem, we only look at the path string here. 100 // filesystem, we only look at the path string here.
101 // The code in the URLRequestFileJob::Start() method discovers that a path, 101 // The code in the URLRequestFileJob::Start() method discovers that a path,
102 // which doesn't end with a slash, should really be treated as a directory, 102 // which doesn't end with a slash, should really be treated as a directory,
103 // and it then redirects to the URLRequestFileDirJob. 103 // and it then redirects to the URLRequestFileDirJob.
104 if (is_file && 104 if (is_file &&
105 file_util::EndsWithSeparator(file_path) && 105 file_util::EndsWithSeparator(file_path) &&
106 file_path.IsAbsolute()) 106 file_path.IsAbsolute())
107 return new URLRequestFileDirJob(request, file_path); 107 return new URLRequestFileDirJob(request, file_path);
108 108
109 // Use a regular file request job for all non-directories (including invalid 109 // Use a regular file request job for all non-directories (including invalid
110 // file names). 110 // file names).
111 return new URLRequestFileJob(request, file_path); 111 return new URLRequestFileJob(request, file_path);
112 } 112 }
113 113
114 URLRequestFileJob::URLRequestFileJob(net::URLRequest* request, 114 URLRequestFileJob::URLRequestFileJob(net::URLRequest* request,
115 const FilePath& file_path) 115 const FilePath& file_path)
116 : URLRequestJob(request), 116 : net::URLRequestJob(request),
117 file_path_(file_path), 117 file_path_(file_path),
118 ALLOW_THIS_IN_INITIALIZER_LIST( 118 ALLOW_THIS_IN_INITIALIZER_LIST(
119 io_callback_(this, &URLRequestFileJob::DidRead)), 119 io_callback_(this, &URLRequestFileJob::DidRead)),
120 is_directory_(false), 120 is_directory_(false),
121 remaining_bytes_(0), 121 remaining_bytes_(0),
122 ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) { 122 ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) {
123 } 123 }
124 124
125 URLRequestFileJob::~URLRequestFileJob() { 125 URLRequestFileJob::~URLRequestFileJob() {
126 #if defined(OS_WIN) 126 #if defined(OS_WIN)
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
159 void URLRequestFileJob::Kill() { 159 void URLRequestFileJob::Kill() {
160 stream_.Close(); 160 stream_.Close();
161 161
162 #if defined(OS_WIN) 162 #if defined(OS_WIN)
163 if (async_resolver_) { 163 if (async_resolver_) {
164 async_resolver_->Cancel(); 164 async_resolver_->Cancel();
165 async_resolver_ = NULL; 165 async_resolver_ = NULL;
166 } 166 }
167 #endif 167 #endif
168 168
169 URLRequestJob::Kill(); 169 net::URLRequestJob::Kill();
170 method_factory_.RevokeAll(); 170 method_factory_.RevokeAll();
171 } 171 }
172 172
173 bool URLRequestFileJob::ReadRawData(net::IOBuffer* dest, int dest_size, 173 bool URLRequestFileJob::ReadRawData(net::IOBuffer* dest, int dest_size,
174 int *bytes_read) { 174 int *bytes_read) {
175 DCHECK_NE(dest_size, 0); 175 DCHECK_NE(dest_size, 0);
176 DCHECK(bytes_read); 176 DCHECK(bytes_read);
177 DCHECK_GE(remaining_bytes_, 0); 177 DCHECK_GE(remaining_bytes_, 0);
178 178
179 if (remaining_bytes_ < dest_size) 179 if (remaining_bytes_ < dest_size)
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after
379 const FilePath white_listed_path(kLocalAccessWhiteList[i]); 379 const FilePath white_listed_path(kLocalAccessWhiteList[i]);
380 // FilePath::operator== should probably handle trailing seperators. 380 // FilePath::operator== should probably handle trailing seperators.
381 if (white_listed_path == file_path.StripTrailingSeparators() || 381 if (white_listed_path == file_path.StripTrailingSeparators() ||
382 white_listed_path.IsParent(file_path)) { 382 white_listed_path.IsParent(file_path)) {
383 return false; 383 return false;
384 } 384 }
385 } 385 }
386 return true; 386 return true;
387 } 387 }
388 #endif 388 #endif
OLDNEW
« no previous file with comments | « net/url_request/url_request_file_job.h ('k') | net/url_request/url_request_ftp_job.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698