Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(107)

Side by Side Diff: storage/browser/blob/blob_reader.cc

Issue 1337153002: [Blob] BlobReader class & tests, and removal of all redundant reading. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: asan fix Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "storage/browser/blob/blob_reader.h"
6
7 #include <algorithm>
8 #include <limits>
9
10 #include "base/bind.h"
11 #include "base/sequenced_task_runner.h"
12 #include "base/stl_util.h"
13 #include "base/time/time.h"
14 #include "base/trace_event/trace_event.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/disk_cache/disk_cache.h"
18 #include "storage/browser/blob/blob_data_handle.h"
19 #include "storage/browser/blob/blob_data_snapshot.h"
20 #include "storage/browser/fileapi/file_stream_reader.h"
21 #include "storage/browser/fileapi/file_system_context.h"
22 #include "storage/browser/fileapi/file_system_url.h"
23 #include "storage/common/data_element.h"
24
25 namespace storage {
26 namespace {
27 bool IsFileType(DataElement::Type type) {
28 switch (type) {
29 case DataElement::TYPE_FILE:
30 case DataElement::TYPE_FILE_FILESYSTEM:
31 return true;
32 default:
33 return false;
34 }
35 }
36 } // namespace
37
38 BlobReader::FileStreamReaderProvider::~FileStreamReaderProvider() {}
39
40 BlobReader::BlobReader(
41 const BlobDataHandle* blob_handle,
42 scoped_ptr<FileStreamReaderProvider> file_stream_provider,
43 base::SequencedTaskRunner* file_task_runner)
44 : file_stream_provider_(file_stream_provider.Pass()),
45 file_task_runner_(file_task_runner),
46 net_error_(net::OK),
47 item_list_populated_(false),
48 total_size_calculated_(false),
49 total_size_(0),
50 remaining_bytes_(0),
51 pending_get_file_info_count_(0),
52 current_item_index_(0),
53 current_item_offset_(0),
54 io_pending_(false),
55 weak_factory_(this) {
56 if (blob_handle) {
57 blob_data_ = blob_handle->CreateSnapshot().Pass();
58 }
59 }
60
61 BlobReader::~BlobReader() {
62 STLDeleteValues(&index_to_reader_);
63 }
64
65 BlobReader::Status BlobReader::CalculateSize(net::CompletionCallback done) {
66 DCHECK(!total_size_calculated_);
67 DCHECK(size_callback_.is_null());
68 if (!blob_data_.get()) {
69 return ReportError(net::ERR_FILE_NOT_FOUND);
70 }
71
72 net_error_ = net::OK;
73 total_size_ = 0;
74 const auto& items = blob_data_->items();
75 item_length_list_.resize(items.size());
76 pending_get_file_info_count_ = 0;
77 for (size_t i = 0; i < items.size(); ++i) {
78 const BlobDataItem& item = *items.at(i);
79 if (IsFileType(item.type())) {
80 ++pending_get_file_info_count_;
81 storage::FileStreamReader* const reader = GetOrCreateFileReaderAtIndex(i);
82 if (!reader) {
83 return ReportError(net::ERR_FAILED);
84 }
85 int64_t length_output = reader->GetLength(base::Bind(
86 &BlobReader::DidGetFileItemLength, weak_factory_.GetWeakPtr(), i));
87 if (length_output == net::ERR_IO_PENDING) {
88 continue;
89 }
90 if (length_output < 0) {
91 return ReportError(length_output);
92 }
93 // We got the length right away
94 --pending_get_file_info_count_;
95 uint64_t resolved_length;
96 if (!ResolveFileItemLength(item, length_output, &resolved_length)) {
97 return ReportError(net::ERR_FILE_NOT_FOUND);
98 }
99 if (!AddItemLength(i, resolved_length)) {
100 return ReportError(net::ERR_FAILED);
101 }
102 continue;
103 }
104
105 if (!AddItemLength(i, item.length()))
106 return ReportError(net::ERR_FAILED);
107 }
108
109 if (pending_get_file_info_count_ == 0) {
110 DidCountSize();
111 return Status::DONE;
112 }
113 // Note: We only set the callback if we know that we're an async operation.
114 size_callback_ = done;
115 return Status::IO_PENDING;
116 }
117
118 BlobReader::Status BlobReader::SetReadRange(uint64_t offset, uint64_t length) {
119 if (!blob_data_.get()) {
120 return ReportError(net::ERR_FILE_NOT_FOUND);
121 }
122 if (!total_size_calculated_) {
123 return ReportError(net::ERR_FAILED);
124 }
125 if (offset + length > total_size_) {
126 return ReportError(net::ERR_FILE_NOT_FOUND);
127 }
128 // Skip the initial items that are not in the range.
129 remaining_bytes_ = length;
130 const auto& items = blob_data_->items();
131 for (current_item_index_ = 0;
132 current_item_index_ < items.size() &&
133 offset >= item_length_list_[current_item_index_];
134 ++current_item_index_) {
135 offset -= item_length_list_[current_item_index_];
136 }
137
138 // Set the offset that need to jump to for the first item in the range.
139 current_item_offset_ = offset;
140 if (current_item_offset_ == 0)
141 return Status::DONE;
142
143 // Adjust the offset of the first stream if it is of file type.
144 const BlobDataItem& item = *items.at(current_item_index_);
145 if (IsFileType(item.type())) {
146 SetFileReaderAtIndex(current_item_index_,
147 CreateFileStreamReader(item, offset));
148 }
149 return Status::DONE;
150 }
151
152 BlobReader::Status BlobReader::Read(net::IOBuffer* buffer,
153 size_t dest_size,
154 int* bytes_read,
155 net::CompletionCallback done) {
156 if (!blob_data_.get()) {
157 return ReportError(net::ERR_FILE_NOT_FOUND);
158 }
159 DCHECK(bytes_read);
160 DCHECK_GE(remaining_bytes_, 0ul);
161 DCHECK(read_callback_.is_null());
162
163 if (!total_size_calculated_) {
164 net_error_ = net::ERR_FAILED;
165 }
166
167 // Bail out immediately if we encountered an error.
168 if (net_error_ != net::OK) {
169 *bytes_read = 0;
170 return Status::NET_ERROR;
171 }
172
173 DCHECK_GE(dest_size, 0ul);
174 if (remaining_bytes_ < static_cast<uint64_t>(dest_size))
175 dest_size = static_cast<int>(remaining_bytes_);
176
177 // If we should copy zero bytes because |remaining_bytes_| is zero, short
178 // circuit here.
179 if (!dest_size) {
180 *bytes_read = 0;
181 return Status::DONE;
182 }
183
184 // Keep track of the buffer.
185 DCHECK(!read_buf_.get());
186 read_buf_ = new net::DrainableIOBuffer(buffer, dest_size);
187
188 Status status = ReadLoop(bytes_read);
189 if (status == Status::IO_PENDING)
190 read_callback_ = done;
191 return status;
192 }
193
194 void BlobReader::Kill() {
195 DeleteCurrentFileReader();
196 weak_factory_.InvalidateWeakPtrs();
197 }
198
199 bool BlobReader::IsInMemory() const {
200 if (!blob_data_.get()) {
201 return true;
202 }
203 for (const auto& item : blob_data_->items()) {
204 if (item->type() != DataElement::TYPE_BYTES) {
205 return false;
206 }
207 }
208 return true;
209 }
210
211 void BlobReader::InvalidateCallbacksAndDone(int net_error,
212 net::CompletionCallback done) {
213 net_error_ = net_error;
214 weak_factory_.InvalidateWeakPtrs();
215 size_callback_.Reset();
216 read_callback_.Reset();
217 read_buf_ = nullptr;
218 done.Run(net_error);
219 }
220
221 BlobReader::Status BlobReader::ReportError(int net_error) {
222 net_error_ = net_error;
223 return Status::NET_ERROR;
224 }
225
226 bool BlobReader::AddItemLength(size_t index, uint64_t item_length) {
227 if (item_length > std::numeric_limits<uint64_t>::max() - total_size_) {
228 return false;
229 }
230
231 // Cache the size and add it to the total size.
232 DCHECK_LT(index, item_length_list_.size());
233 item_length_list_[index] = item_length;
234 total_size_ += item_length;
235 return true;
236 }
237
238 bool BlobReader::ResolveFileItemLength(const BlobDataItem& item,
239 int64_t total_length,
240 uint64_t* output_length) {
241 DCHECK(IsFileType(item.type()));
242 DCHECK(output_length);
243 uint64_t file_length = total_length;
244 uint64_t item_offset = item.offset();
245 uint64_t item_length = item.length();
246 if (item_offset > file_length) {
247 return false;
248 }
249
250 uint64 max_length = file_length - item_offset;
251
252 // If item length is undefined, then we need to use the file size being
253 // resolved in the real time.
254 if (item_length == std::numeric_limits<uint64>::max()) {
255 item_length = max_length;
256 } else if (item_length > max_length) {
257 return false;
258 }
259
260 *output_length = item_length;
261 return true;
262 }
263
264 void BlobReader::DidGetFileItemLength(size_t index, int64_t result) {
265 // Do nothing if we have encountered an error.
266 if (net_error_)
267 return;
268
269 if (result == net::ERR_UPLOAD_FILE_CHANGED)
270 result = net::ERR_FILE_NOT_FOUND;
271 if (result < 0) {
272 InvalidateCallbacksAndDone(result, size_callback_);
273 return;
274 }
275
276 const auto& items = blob_data_->items();
277 DCHECK_LT(index, items.size());
278 const BlobDataItem& item = *items.at(index);
279 uint64_t length;
280 if (!ResolveFileItemLength(item, result, &length)) {
281 InvalidateCallbacksAndDone(net::ERR_FILE_NOT_FOUND, size_callback_);
282 return;
283 }
284 if (!AddItemLength(index, length)) {
285 InvalidateCallbacksAndDone(net::ERR_FAILED, size_callback_);
286 return;
287 }
288
289 if (--pending_get_file_info_count_ == 0)
290 DidCountSize();
291 }
292
293 void BlobReader::DidCountSize() {
294 DCHECK(!net_error_);
295 total_size_calculated_ = true;
296 remaining_bytes_ = total_size_;
297 // This is set only if we're async.
298 if (!size_callback_.is_null()) {
299 net::CompletionCallback done = size_callback_;
300 size_callback_.Reset();
301 done.Run(net::OK);
302 }
303 }
304
305 BlobReader::Status BlobReader::ReadLoop(int* bytes_read) {
306 // Read until we encounter an error or could not get the data immediately.
307 while (remaining_bytes_ > 0 && read_buf_->BytesRemaining() > 0) {
308 Status read_status = ReadItem();
309 if (read_status == Status::DONE) {
310 continue;
311 }
312 return read_status;
313 }
314
315 *bytes_read = BytesReadCompleted();
316 return Status::DONE;
317 }
318
319 BlobReader::Status BlobReader::ReadItem() {
320 // Are we done with reading all the blob data?
321 if (remaining_bytes_ == 0)
322 return Status::DONE;
323
324 const auto& items = blob_data_->items();
325 // If we get to the last item but still expect something to read, bail out
326 // since something is wrong.
327 if (current_item_index_ >= items.size()) {
328 return ReportError(net::ERR_FAILED);
329 }
330
331 // Compute the bytes to read for current item.
332 int bytes_to_read = ComputeBytesToRead();
333
334 // If nothing to read for current item, advance to next item.
335 if (bytes_to_read == 0) {
336 AdvanceItem();
337 return Status::DONE;
338 }
339
340 // Do the reading.
341 const BlobDataItem& item = *items.at(current_item_index_);
342 if (item.type() == DataElement::TYPE_BYTES) {
343 ReadBytesItem(item, bytes_to_read);
344 return Status::DONE;
345 }
346 if (item.type() == DataElement::TYPE_DISK_CACHE_ENTRY)
347 return ReadDiskCacheEntryItem(item, bytes_to_read);
348 if (!IsFileType(item.type())) {
349 NOTREACHED();
350 return ReportError(net::ERR_FAILED);
351 }
352 storage::FileStreamReader* const reader =
353 GetOrCreateFileReaderAtIndex(current_item_index_);
354 if (!reader) {
355 return ReportError(net::ERR_FAILED);
356 }
357
358 return ReadFileItem(reader, bytes_to_read);
359 }
360
361 void BlobReader::AdvanceItem() {
362 // Close the file if the current item is a file.
363 DeleteCurrentFileReader();
364
365 // Advance to the next item.
366 current_item_index_++;
367 current_item_offset_ = 0;
368 }
369
370 void BlobReader::AdvanceBytesRead(int result) {
371 DCHECK_GT(result, 0);
372
373 // Do we finish reading the current item?
374 current_item_offset_ += result;
375 if (current_item_offset_ == item_length_list_[current_item_index_])
376 AdvanceItem();
377
378 // Subtract the remaining bytes.
379 remaining_bytes_ -= result;
380 DCHECK_GE(remaining_bytes_, 0ul);
381
382 // Adjust the read buffer.
383 read_buf_->DidConsume(result);
384 DCHECK_GE(read_buf_->BytesRemaining(), 0);
385 }
386
387 void BlobReader::ReadBytesItem(const BlobDataItem& item, int bytes_to_read) {
388 TRACE_EVENT1("Blob", "BlobReader::ReadBytesItem", "uuid", blob_data_->uuid());
389 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
390
391 memcpy(read_buf_->data(), item.bytes() + item.offset() + current_item_offset_,
392 bytes_to_read);
393
394 AdvanceBytesRead(bytes_to_read);
395 }
396
397 BlobReader::Status BlobReader::ReadFileItem(FileStreamReader* reader,
398 int bytes_to_read) {
399 DCHECK(!io_pending_)
400 << "Can't begin IO while another IO operation is pending.";
401 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
402 DCHECK(reader);
403 TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadFileItem", this, "uuid",
404 blob_data_->uuid());
405 const int result = reader->Read(
406 read_buf_.get(), bytes_to_read,
407 base::Bind(&BlobReader::DidReadFile, weak_factory_.GetWeakPtr()));
408 if (result >= 0) {
409 AdvanceBytesRead(result);
410 return Status::DONE;
411 }
412 if (result == net::ERR_IO_PENDING) {
413 io_pending_ = true;
414 return Status::IO_PENDING;
415 }
416 return ReportError(result);
417 }
418
419 void BlobReader::DidReadFile(int result) {
420 DCHECK(io_pending_) << "Asynchronous IO completed while IO wasn't pending?";
421 TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadFileItem", this, "uuid",
michaeln 2015/09/23 18:52:46 I think the only difference between DidReadFile()
dmurph 2015/09/25 17:11:44 Done!
422 blob_data_->uuid());
423 io_pending_ = false;
424 if (result <= 0) {
425 InvalidateCallbacksAndDone(result, read_callback_);
426 return;
427 }
428
429 AdvanceBytesRead(result);
430
431 // Otherwise, continue the reading.
432 ContinueAsyncReadLoop();
433 }
434
435 void BlobReader::ContinueAsyncReadLoop() {
436 int bytes_read = 0;
437 Status read_status = ReadLoop(&bytes_read);
438 switch (read_status) {
439 case Status::DONE: {
440 net::CompletionCallback done = read_callback_;
441 read_callback_.Reset();
442 done.Run(bytes_read);
443 return;
444 }
445 case Status::NET_ERROR:
446 InvalidateCallbacksAndDone(net_error_, read_callback_);
447 return;
448 case Status::IO_PENDING:
449 return;
450 }
451 }
452
453 void BlobReader::DeleteCurrentFileReader() {
454 SetFileReaderAtIndex(current_item_index_, scoped_ptr<FileStreamReader>());
455 }
456
457 BlobReader::Status BlobReader::ReadDiskCacheEntryItem(const BlobDataItem& item,
458 int bytes_to_read) {
459 DCHECK(!io_pending_)
460 << "Can't begin IO while another IO operation is pending.";
461 TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadDiskCacheItem", this,
462 "uuid", blob_data_->uuid());
463 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
464
465 const int result = item.disk_cache_entry()->ReadData(
466 item.disk_cache_stream_index(), current_item_offset_, read_buf_.get(),
467 bytes_to_read, base::Bind(&BlobReader::DidReadDiskCacheEntry,
468 weak_factory_.GetWeakPtr()));
469 if (result >= 0) {
470 AdvanceBytesRead(result);
471 return Status::DONE;
472 }
473 if (result == net::ERR_IO_PENDING) {
474 io_pending_ = true;
475 return Status::IO_PENDING;
476 }
477 return ReportError(result);
478 }
479
480 void BlobReader::DidReadDiskCacheEntry(int result) {
481 DCHECK(io_pending_) << "Asynchronous IO completed while IO wasn't pending?";
482 TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadDiskCacheItem", this, "uuid",
483 blob_data_->uuid());
484 io_pending_ = false;
485 if (result <= 0) {
486 InvalidateCallbacksAndDone(result, read_callback_);
487 return;
488 }
489
490 AdvanceBytesRead(result);
491
492 ContinueAsyncReadLoop();
493 }
494
495 int BlobReader::BytesReadCompleted() {
496 int bytes_read = read_buf_->BytesConsumed();
497 read_buf_ = nullptr;
498 return bytes_read;
499 }
500
501 int BlobReader::ComputeBytesToRead() const {
502 uint64_t current_item_length = item_length_list_[current_item_index_];
503
504 uint64_t item_remaining = current_item_length - current_item_offset_;
505 uint64_t buf_remaining = read_buf_->BytesRemaining();
506 uint64_t max_int_value = std::numeric_limits<int>::max();
507 // Here we make sure we don't overflow 'max int'.
508 uint64_t min = std::min(
509 std::min(std::min(item_remaining, buf_remaining), remaining_bytes_),
510 max_int_value);
511
512 return static_cast<int>(min);
513 }
514
515 FileStreamReader* BlobReader::GetOrCreateFileReaderAtIndex(size_t index) {
516 const auto& items = blob_data_->items();
517 DCHECK_LT(index, items.size());
518 const BlobDataItem& item = *items.at(index);
519 if (!IsFileType(item.type()))
520 return nullptr;
521 auto it = index_to_reader_.find(index);
522 if (it != index_to_reader_.end()) {
523 DCHECK(it->second);
524 return it->second;
525 }
526 scoped_ptr<FileStreamReader> reader = CreateFileStreamReader(item, 0);
527 FileStreamReader* ret_value = reader.get();
528 if (!ret_value)
529 return nullptr;
530 index_to_reader_[index] = reader.release();
531 return ret_value;
532 }
533
534 scoped_ptr<FileStreamReader> BlobReader::CreateFileStreamReader(
535 const BlobDataItem& item,
536 uint64_t additional_offset) {
537 DCHECK(IsFileType(item.type()));
538
539 switch (item.type()) {
540 case DataElement::TYPE_FILE:
541 return file_stream_provider_->CreateForLocalFile(
542 file_task_runner_.get(), item.path(),
543 item.offset() + additional_offset,
544 item.expected_modification_time())
545 .Pass();
546 case DataElement::TYPE_FILE_FILESYSTEM:
547 return file_stream_provider_
548 ->CreateFileStreamReader(
549 item.filesystem_url(), item.offset() + additional_offset,
550 item.length() == std::numeric_limits<uint64_t>::max()
551 ? storage::kMaximumLength
552 : item.length() - additional_offset,
553 item.expected_modification_time())
554 .Pass();
555 case DataElement::TYPE_BLOB:
556 case DataElement::TYPE_BYTES:
557 case DataElement::TYPE_DISK_CACHE_ENTRY:
558 case DataElement::TYPE_UNKNOWN:
559 break;
560 }
561
562 NOTREACHED();
563 return nullptr;
564 }
565
566 void BlobReader::SetFileReaderAtIndex(size_t index,
567 scoped_ptr<FileStreamReader> reader) {
568 auto found = index_to_reader_.find(current_item_index_);
569 if (found != index_to_reader_.end()) {
570 if (found->second) {
571 delete found->second;
572 }
573 if (!reader.get()) {
574 index_to_reader_.erase(found);
575 return;
576 }
577 found->second = reader.release();
578 } else if (reader.get()) {
579 index_to_reader_[current_item_index_] = reader.release();
580 }
581 }
582
583 } // namespace storage
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698