Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(147)

Side by Side Diff: storage/browser/blob/blob_reader.cc

Issue 1337153002: [Blob] BlobReader class & tests, and removal of all redundant reading. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: comments Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "storage/browser/blob/blob_reader.h"
6
7 #include <algorithm>
8 #include <limits>
9
10 #include "base/bind.h"
11 #include "base/sequenced_task_runner.h"
12 #include "base/stl_util.h"
13 #include "base/time/time.h"
14 #include "base/trace_event/trace_event.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/disk_cache/disk_cache.h"
18 #include "storage/browser/blob/blob_data_handle.h"
19 #include "storage/browser/blob/blob_data_snapshot.h"
20 #include "storage/browser/fileapi/file_stream_reader.h"
21 #include "storage/browser/fileapi/file_system_context.h"
22 #include "storage/browser/fileapi/file_system_url.h"
23 #include "storage/common/data_element.h"
24
25 namespace storage {
26 namespace {
27 bool IsFileType(DataElement::Type type) {
28 switch (type) {
29 case DataElement::TYPE_FILE:
30 case DataElement::TYPE_FILE_FILESYSTEM:
31 return true;
32 default:
33 return false;
34 }
35 }
36 } // namespace
37
38 BlobReader::FileStreamReaderProvider::~FileStreamReaderProvider() {}
39
40 BlobReader::BlobReader(
41 const BlobDataHandle* blob_handle,
42 scoped_ptr<FileStreamReaderProvider> file_stream_provider,
43 base::SequencedTaskRunner* file_task_runner)
44 : file_stream_provider_(file_stream_provider.Pass()),
45 file_task_runner_(file_task_runner),
46 net_error_(net::OK),
47 item_list_populated_(false),
48 total_size_calculated_(false),
49 total_size_(0),
50 remaining_bytes_(0),
51 pending_get_file_info_count_(0),
52 current_item_index_(0),
53 current_item_offset_(0),
54 io_pending_(false),
kinuko 2015/09/26 15:14:26 nit: it might be easier to initialize some of thes
dmurph 2015/09/28 19:40:02 Done.
55 weak_factory_(this) {
56 if (blob_handle) {
57 blob_data_ = blob_handle->CreateSnapshot().Pass();
58 }
59 }
60
61 BlobReader::~BlobReader() {
62 STLDeleteValues(&index_to_reader_);
63 }
64
65 BlobReader::Status BlobReader::CalculateSize(net::CompletionCallback done) {
66 DCHECK(!total_size_calculated_);
67 DCHECK(size_callback_.is_null());
68 if (!blob_data_.get()) {
69 return ReportError(net::ERR_FILE_NOT_FOUND);
70 }
71
72 net_error_ = net::OK;
73 total_size_ = 0;
74 const auto& items = blob_data_->items();
75 item_length_list_.resize(items.size());
76 pending_get_file_info_count_ = 0;
77 for (size_t i = 0; i < items.size(); ++i) {
78 const BlobDataItem& item = *items.at(i);
79 if (IsFileType(item.type())) {
80 ++pending_get_file_info_count_;
81 storage::FileStreamReader* const reader = GetOrCreateFileReaderAtIndex(i);
82 if (!reader) {
83 return ReportError(net::ERR_FAILED);
84 }
85 int64_t length_output = reader->GetLength(base::Bind(
86 &BlobReader::DidGetFileItemLength, weak_factory_.GetWeakPtr(), i));
87 if (length_output == net::ERR_IO_PENDING) {
88 continue;
89 }
90 if (length_output < 0) {
91 return ReportError(length_output);
92 }
93 // We got the length right away
94 --pending_get_file_info_count_;
95 uint64_t resolved_length;
96 if (!ResolveFileItemLength(item, length_output, &resolved_length)) {
97 return ReportError(net::ERR_FILE_NOT_FOUND);
98 }
99 if (!AddItemLength(i, resolved_length)) {
100 return ReportError(net::ERR_FAILED);
101 }
102 continue;
103 }
104
105 if (!AddItemLength(i, item.length()))
106 return ReportError(net::ERR_FAILED);
107 }
108
109 if (pending_get_file_info_count_ == 0) {
110 DidCountSize();
111 return Status::DONE;
112 }
113 // Note: We only set the callback if we know that we're an async operation.
114 size_callback_ = done;
115 return Status::IO_PENDING;
116 }
117
118 BlobReader::Status BlobReader::SetReadRange(uint64_t offset, uint64_t length) {
119 if (!blob_data_.get()) {
120 return ReportError(net::ERR_FILE_NOT_FOUND);
121 }
122 if (!total_size_calculated_) {
123 return ReportError(net::ERR_FAILED);
124 }
125 if (offset + length > total_size_) {
126 return ReportError(net::ERR_FILE_NOT_FOUND);
kinuko 2015/09/26 15:14:26 Is this error type right??
dmurph 2015/09/28 19:40:02 This is the same behavior as before.
127 }
128 // Skip the initial items that are not in the range.
129 remaining_bytes_ = length;
130 const auto& items = blob_data_->items();
131 for (current_item_index_ = 0;
132 current_item_index_ < items.size() &&
133 offset >= item_length_list_[current_item_index_];
134 ++current_item_index_) {
135 offset -= item_length_list_[current_item_index_];
136 }
137
138 // Set the offset that need to jump to for the first item in the range.
139 current_item_offset_ = offset;
140 if (current_item_offset_ == 0)
141 return Status::DONE;
142
143 // Adjust the offset of the first stream if it is of file type.
144 const BlobDataItem& item = *items.at(current_item_index_);
145 if (IsFileType(item.type())) {
146 SetFileReaderAtIndex(current_item_index_,
147 CreateFileStreamReader(item, offset));
148 }
149 return Status::DONE;
150 }
151
152 BlobReader::Status BlobReader::Read(net::IOBuffer* buffer,
153 size_t dest_size,
154 int* bytes_read,
155 net::CompletionCallback done) {
156 if (!blob_data_.get()) {
157 return ReportError(net::ERR_FILE_NOT_FOUND);
kinuko 2015/09/26 15:14:26 Do we need to set *bytes_read = 0 in this case too
dmurph 2015/09/28 19:40:02 Done.
158 }
159 DCHECK(bytes_read);
160 DCHECK_GE(remaining_bytes_, 0ul);
161 DCHECK(read_callback_.is_null());
162
163 if (!total_size_calculated_) {
164 net_error_ = net::ERR_FAILED;
165 }
166
167 // Bail out immediately if we encountered an error.
168 if (net_error_ != net::OK) {
169 *bytes_read = 0;
170 return Status::NET_ERROR;
171 }
kinuko 2015/09/26 15:14:26 It feels we can set *bytes_read = 0 at the beginni
dmurph 2015/09/28 19:40:02 Sure, I had it be an error case for tests, but I c
172
173 DCHECK_GE(dest_size, 0ul);
174 if (remaining_bytes_ < static_cast<uint64_t>(dest_size))
175 dest_size = static_cast<int>(remaining_bytes_);
176
177 // If we should copy zero bytes because |remaining_bytes_| is zero, short
178 // circuit here.
179 if (!dest_size) {
180 *bytes_read = 0;
181 return Status::DONE;
182 }
183
184 // Keep track of the buffer.
185 DCHECK(!read_buf_.get());
186 read_buf_ = new net::DrainableIOBuffer(buffer, dest_size);
187
188 Status status = ReadLoop(bytes_read);
189 if (status == Status::IO_PENDING)
190 read_callback_ = done;
191 return status;
192 }
193
194 void BlobReader::Kill() {
195 DeleteCurrentFileReader();
196 weak_factory_.InvalidateWeakPtrs();
197 }
198
199 bool BlobReader::IsInMemory() const {
200 if (!blob_data_.get()) {
201 return true;
202 }
203 for (const auto& item : blob_data_->items()) {
204 if (item->type() != DataElement::TYPE_BYTES) {
205 return false;
206 }
207 }
208 return true;
209 }
210
211 void BlobReader::InvalidateCallbacksAndDone(int net_error,
212 net::CompletionCallback done) {
213 net_error_ = net_error;
214 weak_factory_.InvalidateWeakPtrs();
215 size_callback_.Reset();
216 read_callback_.Reset();
217 read_buf_ = nullptr;
218 done.Run(net_error);
219 }
220
221 BlobReader::Status BlobReader::ReportError(int net_error) {
222 net_error_ = net_error;
223 return Status::NET_ERROR;
224 }
225
226 bool BlobReader::AddItemLength(size_t index, uint64_t item_length) {
227 if (item_length > std::numeric_limits<uint64_t>::max() - total_size_) {
228 return false;
229 }
230
231 // Cache the size and add it to the total size.
232 DCHECK_LT(index, item_length_list_.size());
233 item_length_list_[index] = item_length;
234 total_size_ += item_length;
235 return true;
236 }
237
238 bool BlobReader::ResolveFileItemLength(const BlobDataItem& item,
239 int64_t total_length,
240 uint64_t* output_length) {
241 DCHECK(IsFileType(item.type()));
242 DCHECK(output_length);
243 uint64_t file_length = total_length;
244 uint64_t item_offset = item.offset();
245 uint64_t item_length = item.length();
246 if (item_offset > file_length) {
247 return false;
248 }
249
250 uint64 max_length = file_length - item_offset;
251
252 // If item length is undefined, then we need to use the file size being
253 // resolved in the real time.
254 if (item_length == std::numeric_limits<uint64>::max()) {
255 item_length = max_length;
256 } else if (item_length > max_length) {
257 return false;
258 }
259
260 *output_length = item_length;
261 return true;
262 }
263
264 void BlobReader::DidGetFileItemLength(size_t index, int64_t result) {
265 // Do nothing if we have encountered an error.
266 if (net_error_)
267 return;
268
269 if (result == net::ERR_UPLOAD_FILE_CHANGED)
270 result = net::ERR_FILE_NOT_FOUND;
271 if (result < 0) {
272 InvalidateCallbacksAndDone(result, size_callback_);
273 return;
274 }
275
276 const auto& items = blob_data_->items();
277 DCHECK_LT(index, items.size());
278 const BlobDataItem& item = *items.at(index);
279 uint64_t length;
280 if (!ResolveFileItemLength(item, result, &length)) {
281 InvalidateCallbacksAndDone(net::ERR_FILE_NOT_FOUND, size_callback_);
282 return;
283 }
284 if (!AddItemLength(index, length)) {
285 InvalidateCallbacksAndDone(net::ERR_FAILED, size_callback_);
286 return;
287 }
288
289 if (--pending_get_file_info_count_ == 0)
290 DidCountSize();
291 }
292
293 void BlobReader::DidCountSize() {
294 DCHECK(!net_error_);
295 total_size_calculated_ = true;
296 remaining_bytes_ = total_size_;
297 // This is set only if we're async.
298 if (!size_callback_.is_null()) {
299 net::CompletionCallback done = size_callback_;
300 size_callback_.Reset();
301 done.Run(net::OK);
302 }
303 }
304
305 BlobReader::Status BlobReader::ReadLoop(int* bytes_read) {
306 // Read until we encounter an error or could not get the data immediately.
307 while (remaining_bytes_ > 0 && read_buf_->BytesRemaining() > 0) {
308 Status read_status = ReadItem();
309 if (read_status == Status::DONE) {
310 continue;
311 }
312 return read_status;
313 }
314
315 *bytes_read = BytesReadCompleted();
316 return Status::DONE;
317 }
318
319 BlobReader::Status BlobReader::ReadItem() {
320 // Are we done with reading all the blob data?
321 if (remaining_bytes_ == 0)
322 return Status::DONE;
323
324 const auto& items = blob_data_->items();
325 // If we get to the last item but still expect something to read, bail out
326 // since something is wrong.
327 if (current_item_index_ >= items.size()) {
328 return ReportError(net::ERR_FAILED);
329 }
330
331 // Compute the bytes to read for current item.
332 int bytes_to_read = ComputeBytesToRead();
333
334 // If nothing to read for current item, advance to next item.
335 if (bytes_to_read == 0) {
336 AdvanceItem();
337 return Status::DONE;
338 }
339
340 // Do the reading.
341 const BlobDataItem& item = *items.at(current_item_index_);
342 if (item.type() == DataElement::TYPE_BYTES) {
343 ReadBytesItem(item, bytes_to_read);
344 return Status::DONE;
345 }
346 if (item.type() == DataElement::TYPE_DISK_CACHE_ENTRY)
347 return ReadDiskCacheEntryItem(item, bytes_to_read);
348 if (!IsFileType(item.type())) {
349 NOTREACHED();
350 return ReportError(net::ERR_FAILED);
351 }
352 storage::FileStreamReader* const reader =
353 GetOrCreateFileReaderAtIndex(current_item_index_);
354 if (!reader) {
355 return ReportError(net::ERR_FAILED);
356 }
357
358 return ReadFileItem(reader, bytes_to_read);
359 }
360
361 void BlobReader::AdvanceItem() {
362 // Close the file if the current item is a file.
363 DeleteCurrentFileReader();
364
365 // Advance to the next item.
366 current_item_index_++;
367 current_item_offset_ = 0;
368 }
369
370 void BlobReader::AdvanceBytesRead(int result) {
371 DCHECK_GT(result, 0);
372
373 // Do we finish reading the current item?
374 current_item_offset_ += result;
375 if (current_item_offset_ == item_length_list_[current_item_index_])
376 AdvanceItem();
377
378 // Subtract the remaining bytes.
379 remaining_bytes_ -= result;
380 DCHECK_GE(remaining_bytes_, 0ul);
381
382 // Adjust the read buffer.
383 read_buf_->DidConsume(result);
384 DCHECK_GE(read_buf_->BytesRemaining(), 0);
385 }
386
387 void BlobReader::ReadBytesItem(const BlobDataItem& item, int bytes_to_read) {
388 TRACE_EVENT1("Blob", "BlobReader::ReadBytesItem", "uuid", blob_data_->uuid());
389 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
390
391 memcpy(read_buf_->data(), item.bytes() + item.offset() + current_item_offset_,
392 bytes_to_read);
393
394 AdvanceBytesRead(bytes_to_read);
395 }
396
397 BlobReader::Status BlobReader::ReadFileItem(FileStreamReader* reader,
398 int bytes_to_read) {
399 DCHECK(!io_pending_)
400 << "Can't begin IO while another IO operation is pending.";
401 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
402 DCHECK(reader);
403 TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadFileItem", this, "uuid",
404 blob_data_->uuid());
405 const int result = reader->Read(
406 read_buf_.get(), bytes_to_read,
407 base::Bind(&BlobReader::DidReadFile, weak_factory_.GetWeakPtr()));
408 if (result >= 0) {
409 AdvanceBytesRead(result);
410 return Status::DONE;
411 }
412 if (result == net::ERR_IO_PENDING) {
413 io_pending_ = true;
414 return Status::IO_PENDING;
415 }
416 return ReportError(result);
417 }
418
419 void BlobReader::DidReadFile(int result) {
420 TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadFileItem", this, "uuid",
421 blob_data_->uuid());
422 DidReadItem(result);
423 }
424
425 void BlobReader::ContinueAsyncReadLoop() {
426 int bytes_read = 0;
427 Status read_status = ReadLoop(&bytes_read);
428 switch (read_status) {
429 case Status::DONE: {
430 net::CompletionCallback done = read_callback_;
431 read_callback_.Reset();
432 done.Run(bytes_read);
433 return;
434 }
435 case Status::NET_ERROR:
436 InvalidateCallbacksAndDone(net_error_, read_callback_);
437 return;
438 case Status::IO_PENDING:
439 return;
440 }
441 }
442
443 void BlobReader::DeleteCurrentFileReader() {
444 SetFileReaderAtIndex(current_item_index_, scoped_ptr<FileStreamReader>());
445 }
446
447 BlobReader::Status BlobReader::ReadDiskCacheEntryItem(const BlobDataItem& item,
448 int bytes_to_read) {
449 DCHECK(!io_pending_)
450 << "Can't begin IO while another IO operation is pending.";
451 TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadDiskCacheItem", this,
452 "uuid", blob_data_->uuid());
453 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
454
455 const int result = item.disk_cache_entry()->ReadData(
456 item.disk_cache_stream_index(), current_item_offset_, read_buf_.get(),
457 bytes_to_read, base::Bind(&BlobReader::DidReadDiskCacheEntry,
458 weak_factory_.GetWeakPtr()));
459 if (result >= 0) {
460 AdvanceBytesRead(result);
461 return Status::DONE;
462 }
463 if (result == net::ERR_IO_PENDING) {
464 io_pending_ = true;
465 return Status::IO_PENDING;
466 }
467 return ReportError(result);
468 }
469
470 void BlobReader::DidReadDiskCacheEntry(int result) {
471 TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadDiskCacheItem", this, "uuid",
472 blob_data_->uuid());
473 DidReadItem(result);
474 }
475
476 void BlobReader::DidReadItem(int result) {
477 DCHECK(io_pending_) << "Asynchronous IO completed while IO wasn't pending?";
478 io_pending_ = false;
479 if (result <= 0) {
480 InvalidateCallbacksAndDone(result, read_callback_);
481 return;
482 }
483 AdvanceBytesRead(result);
484 ContinueAsyncReadLoop();
485 }
486
487 int BlobReader::BytesReadCompleted() {
488 int bytes_read = read_buf_->BytesConsumed();
489 read_buf_ = nullptr;
490 return bytes_read;
491 }
492
493 int BlobReader::ComputeBytesToRead() const {
494 uint64_t current_item_length = item_length_list_[current_item_index_];
495
496 uint64_t item_remaining = current_item_length - current_item_offset_;
497 uint64_t buf_remaining = read_buf_->BytesRemaining();
498 uint64_t max_int_value = std::numeric_limits<int>::max();
499 // Here we make sure we don't overflow 'max int'.
500 uint64_t min = std::min(
501 std::min(std::min(item_remaining, buf_remaining), remaining_bytes_),
502 max_int_value);
503
504 return static_cast<int>(min);
505 }
506
507 FileStreamReader* BlobReader::GetOrCreateFileReaderAtIndex(size_t index) {
508 const auto& items = blob_data_->items();
509 DCHECK_LT(index, items.size());
510 const BlobDataItem& item = *items.at(index);
511 if (!IsFileType(item.type()))
512 return nullptr;
513 auto it = index_to_reader_.find(index);
514 if (it != index_to_reader_.end()) {
515 DCHECK(it->second);
516 return it->second;
517 }
518 scoped_ptr<FileStreamReader> reader = CreateFileStreamReader(item, 0);
519 FileStreamReader* ret_value = reader.get();
520 if (!ret_value)
521 return nullptr;
522 index_to_reader_[index] = reader.release();
523 return ret_value;
524 }
525
526 scoped_ptr<FileStreamReader> BlobReader::CreateFileStreamReader(
527 const BlobDataItem& item,
528 uint64_t additional_offset) {
529 DCHECK(IsFileType(item.type()));
530
531 switch (item.type()) {
532 case DataElement::TYPE_FILE:
533 return file_stream_provider_->CreateForLocalFile(
534 file_task_runner_.get(), item.path(),
535 item.offset() + additional_offset,
536 item.expected_modification_time())
537 .Pass();
538 case DataElement::TYPE_FILE_FILESYSTEM:
539 return file_stream_provider_
540 ->CreateFileStreamReader(
541 item.filesystem_url(), item.offset() + additional_offset,
542 item.length() == std::numeric_limits<uint64_t>::max()
543 ? storage::kMaximumLength
544 : item.length() - additional_offset,
545 item.expected_modification_time())
546 .Pass();
547 case DataElement::TYPE_BLOB:
548 case DataElement::TYPE_BYTES:
549 case DataElement::TYPE_DISK_CACHE_ENTRY:
550 case DataElement::TYPE_UNKNOWN:
551 break;
552 }
553
554 NOTREACHED();
555 return nullptr;
556 }
557
558 void BlobReader::SetFileReaderAtIndex(size_t index,
559 scoped_ptr<FileStreamReader> reader) {
560 auto found = index_to_reader_.find(current_item_index_);
561 if (found != index_to_reader_.end()) {
562 if (found->second) {
563 delete found->second;
564 }
565 if (!reader.get()) {
566 index_to_reader_.erase(found);
567 return;
568 }
569 found->second = reader.release();
570 } else if (reader.get()) {
571 index_to_reader_[current_item_index_] = reader.release();
572 }
573 }
574
575 } // namespace storage
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698