Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(375)

Side by Side Diff: net/url_request/url_request_file_job.cc

Issue 227943003: Add experiment to measure time to hash extension content as we read it (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: responded to review comments Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // For loading files, we make use of overlapped i/o to ensure that reading from 5 // For loading files, we make use of overlapped i/o to ensure that reading from
6 // the filesystem (e.g., a network filesystem) does not block the calling 6 // the filesystem (e.g., a network filesystem) does not block the calling
7 // thread. An alternative approach would be to use a background thread or pool 7 // thread. An alternative approach would be to use a background thread or pool
8 // of threads, but it seems better to leverage the operating system's ability 8 // of threads, but it seems better to leverage the operating system's ability
9 // to do background file reads for us. 9 // to do background file reads for us.
10 // 10 //
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
76 base::Owned(meta_info))); 76 base::Owned(meta_info)));
77 } 77 }
78 78
79 void URLRequestFileJob::Kill() { 79 void URLRequestFileJob::Kill() {
80 stream_.reset(); 80 stream_.reset();
81 weak_ptr_factory_.InvalidateWeakPtrs(); 81 weak_ptr_factory_.InvalidateWeakPtrs();
82 82
83 URLRequestJob::Kill(); 83 URLRequestJob::Kill();
84 } 84 }
85 85
86 bool URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size, 86 bool URLRequestFileJob::ReadRawData(IOBuffer* dest,
87 int *bytes_read) { 87 int dest_size,
88 int* bytes_read) {
88 DCHECK_NE(dest_size, 0); 89 DCHECK_NE(dest_size, 0);
89 DCHECK(bytes_read); 90 DCHECK(bytes_read);
90 DCHECK_GE(remaining_bytes_, 0); 91 DCHECK_GE(remaining_bytes_, 0);
91 92
92 if (remaining_bytes_ < dest_size) 93 if (remaining_bytes_ < dest_size)
93 dest_size = static_cast<int>(remaining_bytes_); 94 dest_size = static_cast<int>(remaining_bytes_);
94 95
95 // If we should copy zero bytes because |remaining_bytes_| is zero, short 96 // If we should copy zero bytes because |remaining_bytes_| is zero, short
96 // circuit here. 97 // circuit here.
97 if (!dest_size) { 98 if (!dest_size) {
98 *bytes_read = 0; 99 *bytes_read = 0;
99 return true; 100 return true;
100 } 101 }
101 102
102 int rv = stream_->Read(dest, dest_size, 103 int rv = stream_->Read(dest,
104 dest_size,
103 base::Bind(&URLRequestFileJob::DidRead, 105 base::Bind(&URLRequestFileJob::DidRead,
104 weak_ptr_factory_.GetWeakPtr())); 106 weak_ptr_factory_.GetWeakPtr(),
107 make_scoped_refptr(dest)));
105 if (rv >= 0) { 108 if (rv >= 0) {
106 // Data is immediately available. 109 // Data is immediately available.
107 *bytes_read = rv; 110 *bytes_read = rv;
108 remaining_bytes_ -= rv; 111 remaining_bytes_ -= rv;
109 DCHECK_GE(remaining_bytes_, 0); 112 DCHECK_GE(remaining_bytes_, 0);
110 return true; 113 return true;
111 } 114 }
112 115
113 // Otherwise, a read error occured. We may just need to wait... 116 // Otherwise, a read error occured. We may just need to wait...
114 if (rv == ERR_IO_PENDING) { 117 if (rv == ERR_IO_PENDING) {
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
185 // because we need to do multipart encoding here. 188 // because we need to do multipart encoding here.
186 // TODO(hclam): decide whether we want to support multiple range 189 // TODO(hclam): decide whether we want to support multiple range
187 // requests. 190 // requests.
188 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, 191 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
189 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); 192 ERR_REQUEST_RANGE_NOT_SATISFIABLE));
190 } 193 }
191 } 194 }
192 } 195 }
193 } 196 }
194 197
198 void URLRequestFileJob::OnSeekComplete(int64 result) {
199 }
200
201 void URLRequestFileJob::OnReadComplete(net::IOBuffer* buf, int result) {
202 }
203
195 URLRequestFileJob::~URLRequestFileJob() { 204 URLRequestFileJob::~URLRequestFileJob() {
196 } 205 }
197 206
198 void URLRequestFileJob::FetchMetaInfo(const base::FilePath& file_path, 207 void URLRequestFileJob::FetchMetaInfo(const base::FilePath& file_path,
199 FileMetaInfo* meta_info) { 208 FileMetaInfo* meta_info) {
200 base::File::Info file_info; 209 base::File::Info file_info;
201 meta_info->file_exists = base::GetFileInfo(file_path, &file_info); 210 meta_info->file_exists = base::GetFileInfo(file_path, &file_info);
202 if (meta_info->file_exists) { 211 if (meta_info->file_exists) {
203 meta_info->file_size = file_info.size; 212 meta_info->file_size = file_info.size;
204 meta_info->is_directory = file_info.is_directory; 213 meta_info->is_directory = file_info.is_directory;
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
266 } 275 }
267 } else { 276 } else {
268 // We didn't need to call stream_->Seek() at all, so we pass to DidSeek() 277 // We didn't need to call stream_->Seek() at all, so we pass to DidSeek()
269 // the value that would mean seek success. This way we skip the code 278 // the value that would mean seek success. This way we skip the code
270 // handling seek failure. 279 // handling seek failure.
271 DidSeek(byte_range_.first_byte_position()); 280 DidSeek(byte_range_.first_byte_position());
272 } 281 }
273 } 282 }
274 283
275 void URLRequestFileJob::DidSeek(int64 result) { 284 void URLRequestFileJob::DidSeek(int64 result) {
285 OnSeekComplete(result);
276 if (result != byte_range_.first_byte_position()) { 286 if (result != byte_range_.first_byte_position()) {
277 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, 287 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
278 ERR_REQUEST_RANGE_NOT_SATISFIABLE)); 288 ERR_REQUEST_RANGE_NOT_SATISFIABLE));
279 return; 289 return;
280 } 290 }
281 291
282 set_expected_content_size(remaining_bytes_); 292 set_expected_content_size(remaining_bytes_);
283 NotifyHeadersComplete(); 293 NotifyHeadersComplete();
284 } 294 }
285 295
286 void URLRequestFileJob::DidRead(int result) { 296 void URLRequestFileJob::DidRead(scoped_refptr<net::IOBuffer> buf, int result) {
297 OnReadComplete(buf.get(), result);
298 buf = NULL;
287 if (result > 0) { 299 if (result > 0) {
288 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status 300 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
289 } else if (result == 0) { 301 } else if (result == 0) {
290 NotifyDone(URLRequestStatus()); 302 NotifyDone(URLRequestStatus());
291 } else { 303 } else {
292 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); 304 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
293 } 305 }
294 306
295 remaining_bytes_ -= result; 307 remaining_bytes_ -= result;
296 DCHECK_GE(remaining_bytes_, 0); 308 DCHECK_GE(remaining_bytes_, 0);
297 309
298 NotifyReadComplete(result); 310 NotifyReadComplete(result);
299 } 311 }
300 312
301 } // namespace net 313 } // namespace net
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698