OLD | NEW |
| (Empty) |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "storage/browser/blob/blob_reader.h" | |
6 | |
7 #include <algorithm> | |
8 #include <limits> | |
9 | |
10 #include "base/bind.h" | |
11 #include "base/sequenced_task_runner.h" | |
12 #include "base/stl_util.h" | |
13 #include "base/time/time.h" | |
14 #include "base/trace_event/trace_event.h" | |
15 #include "net/base/io_buffer.h" | |
16 #include "net/base/net_errors.h" | |
17 #include "net/disk_cache/disk_cache.h" | |
18 #include "storage/browser/blob/blob_data_handle.h" | |
19 #include "storage/browser/blob/blob_data_snapshot.h" | |
20 #include "storage/browser/fileapi/file_stream_reader.h" | |
21 #include "storage/browser/fileapi/file_system_context.h" | |
22 #include "storage/browser/fileapi/file_system_url.h" | |
23 #include "storage/common/data_element.h" | |
24 | |
25 namespace storage { | |
26 namespace { | |
27 bool IsFileType(DataElement::Type type) { | |
28 switch (type) { | |
29 case DataElement::TYPE_FILE: | |
30 case DataElement::TYPE_FILE_FILESYSTEM: | |
31 return true; | |
32 default: | |
33 return false; | |
34 } | |
35 } | |
36 } // namespace | |
37 | |
38 BlobReader::FileStreamReaderProvider::~FileStreamReaderProvider() {} | |
39 | |
40 BlobReader::BlobReader( | |
41 const BlobDataHandle* blob_handle, | |
42 scoped_ptr<FileStreamReaderProvider> file_stream_provider, | |
43 base::SequencedTaskRunner* file_task_runner) | |
44 : file_stream_provider_(file_stream_provider.Pass()), | |
45 file_task_runner_(file_task_runner), | |
46 net_error_(net::OK), | |
47 weak_factory_(this) { | |
48 if (blob_handle) { | |
49 blob_data_ = blob_handle->CreateSnapshot().Pass(); | |
50 } | |
51 } | |
52 | |
53 BlobReader::~BlobReader() { | |
54 STLDeleteValues(&index_to_reader_); | |
55 } | |
56 | |
57 BlobReader::Status BlobReader::CalculateSize( | |
58 const net::CompletionCallback& done) { | |
59 DCHECK(!total_size_calculated_); | |
60 DCHECK(size_callback_.is_null()); | |
61 if (!blob_data_.get()) { | |
62 return ReportError(net::ERR_FILE_NOT_FOUND); | |
63 } | |
64 | |
65 net_error_ = net::OK; | |
66 total_size_ = 0; | |
67 const auto& items = blob_data_->items(); | |
68 item_length_list_.resize(items.size()); | |
69 pending_get_file_info_count_ = 0; | |
70 for (size_t i = 0; i < items.size(); ++i) { | |
71 const BlobDataItem& item = *items.at(i); | |
72 if (IsFileType(item.type())) { | |
73 ++pending_get_file_info_count_; | |
74 storage::FileStreamReader* const reader = GetOrCreateFileReaderAtIndex(i); | |
75 if (!reader) { | |
76 return ReportError(net::ERR_FAILED); | |
77 } | |
78 int64_t length_output = reader->GetLength(base::Bind( | |
79 &BlobReader::DidGetFileItemLength, weak_factory_.GetWeakPtr(), i)); | |
80 if (length_output == net::ERR_IO_PENDING) { | |
81 continue; | |
82 } | |
83 if (length_output < 0) { | |
84 return ReportError(length_output); | |
85 } | |
86 // We got the length right away | |
87 --pending_get_file_info_count_; | |
88 uint64_t resolved_length; | |
89 if (!ResolveFileItemLength(item, length_output, &resolved_length)) { | |
90 return ReportError(net::ERR_FILE_NOT_FOUND); | |
91 } | |
92 if (!AddItemLength(i, resolved_length)) { | |
93 return ReportError(net::ERR_FAILED); | |
94 } | |
95 continue; | |
96 } | |
97 | |
98 if (!AddItemLength(i, item.length())) | |
99 return ReportError(net::ERR_FAILED); | |
100 } | |
101 | |
102 if (pending_get_file_info_count_ == 0) { | |
103 DidCountSize(); | |
104 return Status::DONE; | |
105 } | |
106 // Note: We only set the callback if we know that we're an async operation. | |
107 size_callback_ = done; | |
108 return Status::IO_PENDING; | |
109 } | |
110 | |
111 BlobReader::Status BlobReader::SetReadRange(uint64_t offset, uint64_t length) { | |
112 if (!blob_data_.get()) { | |
113 return ReportError(net::ERR_FILE_NOT_FOUND); | |
114 } | |
115 if (!total_size_calculated_) { | |
116 return ReportError(net::ERR_FAILED); | |
117 } | |
118 if (offset + length > total_size_) { | |
119 return ReportError(net::ERR_FILE_NOT_FOUND); | |
120 } | |
121 // Skip the initial items that are not in the range. | |
122 remaining_bytes_ = length; | |
123 const auto& items = blob_data_->items(); | |
124 for (current_item_index_ = 0; | |
125 current_item_index_ < items.size() && | |
126 offset >= item_length_list_[current_item_index_]; | |
127 ++current_item_index_) { | |
128 offset -= item_length_list_[current_item_index_]; | |
129 } | |
130 | |
131 // Set the offset that need to jump to for the first item in the range. | |
132 current_item_offset_ = offset; | |
133 if (current_item_offset_ == 0) | |
134 return Status::DONE; | |
135 | |
136 // Adjust the offset of the first stream if it is of file type. | |
137 const BlobDataItem& item = *items.at(current_item_index_); | |
138 if (IsFileType(item.type())) { | |
139 SetFileReaderAtIndex(current_item_index_, | |
140 CreateFileStreamReader(item, offset)); | |
141 } | |
142 return Status::DONE; | |
143 } | |
144 | |
145 BlobReader::Status BlobReader::Read(net::IOBuffer* buffer, | |
146 size_t dest_size, | |
147 int* bytes_read, | |
148 net::CompletionCallback done) { | |
149 DCHECK(bytes_read); | |
150 DCHECK_GE(remaining_bytes_, 0ul); | |
151 DCHECK(read_callback_.is_null()); | |
152 DCHECK(total_size_calculated_); | |
153 | |
154 *bytes_read = 0; | |
155 if (!blob_data_.get()) { | |
156 return ReportError(net::ERR_FILE_NOT_FOUND); | |
157 } | |
158 if (!total_size_calculated_) { | |
159 return ReportError(net::ERR_FAILED); | |
160 } | |
161 | |
162 // Bail out immediately if we encountered an error. | |
163 if (net_error_ != net::OK) { | |
164 return Status::NET_ERROR; | |
165 } | |
166 | |
167 DCHECK_GE(dest_size, 0ul); | |
168 if (remaining_bytes_ < static_cast<uint64_t>(dest_size)) | |
169 dest_size = static_cast<int>(remaining_bytes_); | |
170 | |
171 // If we should copy zero bytes because |remaining_bytes_| is zero, short | |
172 // circuit here. | |
173 if (!dest_size) { | |
174 *bytes_read = 0; | |
175 return Status::DONE; | |
176 } | |
177 | |
178 // Keep track of the buffer. | |
179 DCHECK(!read_buf_.get()); | |
180 read_buf_ = new net::DrainableIOBuffer(buffer, dest_size); | |
181 | |
182 Status status = ReadLoop(bytes_read); | |
183 if (status == Status::IO_PENDING) | |
184 read_callback_ = done; | |
185 return status; | |
186 } | |
187 | |
188 void BlobReader::Kill() { | |
189 DeleteCurrentFileReader(); | |
190 weak_factory_.InvalidateWeakPtrs(); | |
191 } | |
192 | |
193 bool BlobReader::IsInMemory() const { | |
194 if (!blob_data_.get()) { | |
195 return true; | |
196 } | |
197 for (const auto& item : blob_data_->items()) { | |
198 if (item->type() != DataElement::TYPE_BYTES) { | |
199 return false; | |
200 } | |
201 } | |
202 return true; | |
203 } | |
204 | |
205 void BlobReader::InvalidateCallbacksAndDone(int net_error, | |
206 net::CompletionCallback done) { | |
207 net_error_ = net_error; | |
208 weak_factory_.InvalidateWeakPtrs(); | |
209 size_callback_.Reset(); | |
210 read_callback_.Reset(); | |
211 read_buf_ = nullptr; | |
212 done.Run(net_error); | |
213 } | |
214 | |
215 BlobReader::Status BlobReader::ReportError(int net_error) { | |
216 net_error_ = net_error; | |
217 return Status::NET_ERROR; | |
218 } | |
219 | |
220 bool BlobReader::AddItemLength(size_t index, uint64_t item_length) { | |
221 if (item_length > std::numeric_limits<uint64_t>::max() - total_size_) { | |
222 return false; | |
223 } | |
224 | |
225 // Cache the size and add it to the total size. | |
226 DCHECK_LT(index, item_length_list_.size()); | |
227 item_length_list_[index] = item_length; | |
228 total_size_ += item_length; | |
229 return true; | |
230 } | |
231 | |
232 bool BlobReader::ResolveFileItemLength(const BlobDataItem& item, | |
233 int64_t total_length, | |
234 uint64_t* output_length) { | |
235 DCHECK(IsFileType(item.type())); | |
236 DCHECK(output_length); | |
237 uint64_t file_length = total_length; | |
238 uint64_t item_offset = item.offset(); | |
239 uint64_t item_length = item.length(); | |
240 if (item_offset > file_length) { | |
241 return false; | |
242 } | |
243 | |
244 uint64 max_length = file_length - item_offset; | |
245 | |
246 // If item length is undefined, then we need to use the file size being | |
247 // resolved in the real time. | |
248 if (item_length == std::numeric_limits<uint64>::max()) { | |
249 item_length = max_length; | |
250 } else if (item_length > max_length) { | |
251 return false; | |
252 } | |
253 | |
254 *output_length = item_length; | |
255 return true; | |
256 } | |
257 | |
258 void BlobReader::DidGetFileItemLength(size_t index, int64_t result) { | |
259 // Do nothing if we have encountered an error. | |
260 if (net_error_) | |
261 return; | |
262 | |
263 if (result == net::ERR_UPLOAD_FILE_CHANGED) | |
264 result = net::ERR_FILE_NOT_FOUND; | |
265 if (result < 0) { | |
266 InvalidateCallbacksAndDone(result, size_callback_); | |
267 return; | |
268 } | |
269 | |
270 const auto& items = blob_data_->items(); | |
271 DCHECK_LT(index, items.size()); | |
272 const BlobDataItem& item = *items.at(index); | |
273 uint64_t length; | |
274 if (!ResolveFileItemLength(item, result, &length)) { | |
275 InvalidateCallbacksAndDone(net::ERR_FILE_NOT_FOUND, size_callback_); | |
276 return; | |
277 } | |
278 if (!AddItemLength(index, length)) { | |
279 InvalidateCallbacksAndDone(net::ERR_FAILED, size_callback_); | |
280 return; | |
281 } | |
282 | |
283 if (--pending_get_file_info_count_ == 0) | |
284 DidCountSize(); | |
285 } | |
286 | |
287 void BlobReader::DidCountSize() { | |
288 DCHECK(!net_error_); | |
289 total_size_calculated_ = true; | |
290 remaining_bytes_ = total_size_; | |
291 // This is set only if we're async. | |
292 if (!size_callback_.is_null()) { | |
293 net::CompletionCallback done = size_callback_; | |
294 size_callback_.Reset(); | |
295 done.Run(net::OK); | |
296 } | |
297 } | |
298 | |
299 BlobReader::Status BlobReader::ReadLoop(int* bytes_read) { | |
300 // Read until we encounter an error or could not get the data immediately. | |
301 while (remaining_bytes_ > 0 && read_buf_->BytesRemaining() > 0) { | |
302 Status read_status = ReadItem(); | |
303 if (read_status == Status::DONE) { | |
304 continue; | |
305 } | |
306 return read_status; | |
307 } | |
308 | |
309 *bytes_read = BytesReadCompleted(); | |
310 return Status::DONE; | |
311 } | |
312 | |
313 BlobReader::Status BlobReader::ReadItem() { | |
314 // Are we done with reading all the blob data? | |
315 if (remaining_bytes_ == 0) | |
316 return Status::DONE; | |
317 | |
318 const auto& items = blob_data_->items(); | |
319 // If we get to the last item but still expect something to read, bail out | |
320 // since something is wrong. | |
321 if (current_item_index_ >= items.size()) { | |
322 return ReportError(net::ERR_FAILED); | |
323 } | |
324 | |
325 // Compute the bytes to read for current item. | |
326 int bytes_to_read = ComputeBytesToRead(); | |
327 | |
328 // If nothing to read for current item, advance to next item. | |
329 if (bytes_to_read == 0) { | |
330 AdvanceItem(); | |
331 return Status::DONE; | |
332 } | |
333 | |
334 // Do the reading. | |
335 const BlobDataItem& item = *items.at(current_item_index_); | |
336 if (item.type() == DataElement::TYPE_BYTES) { | |
337 ReadBytesItem(item, bytes_to_read); | |
338 return Status::DONE; | |
339 } | |
340 if (item.type() == DataElement::TYPE_DISK_CACHE_ENTRY) | |
341 return ReadDiskCacheEntryItem(item, bytes_to_read); | |
342 if (!IsFileType(item.type())) { | |
343 NOTREACHED(); | |
344 return ReportError(net::ERR_FAILED); | |
345 } | |
346 storage::FileStreamReader* const reader = | |
347 GetOrCreateFileReaderAtIndex(current_item_index_); | |
348 if (!reader) { | |
349 return ReportError(net::ERR_FAILED); | |
350 } | |
351 | |
352 return ReadFileItem(reader, bytes_to_read); | |
353 } | |
354 | |
355 void BlobReader::AdvanceItem() { | |
356 // Close the file if the current item is a file. | |
357 DeleteCurrentFileReader(); | |
358 | |
359 // Advance to the next item. | |
360 current_item_index_++; | |
361 current_item_offset_ = 0; | |
362 } | |
363 | |
364 void BlobReader::AdvanceBytesRead(int result) { | |
365 DCHECK_GT(result, 0); | |
366 | |
367 // Do we finish reading the current item? | |
368 current_item_offset_ += result; | |
369 if (current_item_offset_ == item_length_list_[current_item_index_]) | |
370 AdvanceItem(); | |
371 | |
372 // Subtract the remaining bytes. | |
373 remaining_bytes_ -= result; | |
374 DCHECK_GE(remaining_bytes_, 0ul); | |
375 | |
376 // Adjust the read buffer. | |
377 read_buf_->DidConsume(result); | |
378 DCHECK_GE(read_buf_->BytesRemaining(), 0); | |
379 } | |
380 | |
381 void BlobReader::ReadBytesItem(const BlobDataItem& item, int bytes_to_read) { | |
382 TRACE_EVENT1("Blob", "BlobReader::ReadBytesItem", "uuid", blob_data_->uuid()); | |
383 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read); | |
384 | |
385 memcpy(read_buf_->data(), item.bytes() + item.offset() + current_item_offset_, | |
386 bytes_to_read); | |
387 | |
388 AdvanceBytesRead(bytes_to_read); | |
389 } | |
390 | |
391 BlobReader::Status BlobReader::ReadFileItem(FileStreamReader* reader, | |
392 int bytes_to_read) { | |
393 DCHECK(!io_pending_) | |
394 << "Can't begin IO while another IO operation is pending."; | |
395 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read); | |
396 DCHECK(reader); | |
397 TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadFileItem", this, "uuid", | |
398 blob_data_->uuid()); | |
399 const int result = reader->Read( | |
400 read_buf_.get(), bytes_to_read, | |
401 base::Bind(&BlobReader::DidReadFile, weak_factory_.GetWeakPtr())); | |
402 if (result >= 0) { | |
403 AdvanceBytesRead(result); | |
404 return Status::DONE; | |
405 } | |
406 if (result == net::ERR_IO_PENDING) { | |
407 io_pending_ = true; | |
408 return Status::IO_PENDING; | |
409 } | |
410 return ReportError(result); | |
411 } | |
412 | |
413 void BlobReader::DidReadFile(int result) { | |
414 TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadFileItem", this, "uuid", | |
415 blob_data_->uuid()); | |
416 DidReadItem(result); | |
417 } | |
418 | |
419 void BlobReader::ContinueAsyncReadLoop() { | |
420 int bytes_read = 0; | |
421 Status read_status = ReadLoop(&bytes_read); | |
422 switch (read_status) { | |
423 case Status::DONE: { | |
424 net::CompletionCallback done = read_callback_; | |
425 read_callback_.Reset(); | |
426 done.Run(bytes_read); | |
427 return; | |
428 } | |
429 case Status::NET_ERROR: | |
430 InvalidateCallbacksAndDone(net_error_, read_callback_); | |
431 return; | |
432 case Status::IO_PENDING: | |
433 return; | |
434 } | |
435 } | |
436 | |
437 void BlobReader::DeleteCurrentFileReader() { | |
438 SetFileReaderAtIndex(current_item_index_, scoped_ptr<FileStreamReader>()); | |
439 } | |
440 | |
441 BlobReader::Status BlobReader::ReadDiskCacheEntryItem(const BlobDataItem& item, | |
442 int bytes_to_read) { | |
443 DCHECK(!io_pending_) | |
444 << "Can't begin IO while another IO operation is pending."; | |
445 TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadDiskCacheItem", this, | |
446 "uuid", blob_data_->uuid()); | |
447 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read); | |
448 | |
449 const int result = item.disk_cache_entry()->ReadData( | |
450 item.disk_cache_stream_index(), current_item_offset_, read_buf_.get(), | |
451 bytes_to_read, base::Bind(&BlobReader::DidReadDiskCacheEntry, | |
452 weak_factory_.GetWeakPtr())); | |
453 if (result >= 0) { | |
454 AdvanceBytesRead(result); | |
455 return Status::DONE; | |
456 } | |
457 if (result == net::ERR_IO_PENDING) { | |
458 io_pending_ = true; | |
459 return Status::IO_PENDING; | |
460 } | |
461 return ReportError(result); | |
462 } | |
463 | |
464 void BlobReader::DidReadDiskCacheEntry(int result) { | |
465 TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadDiskCacheItem", this, "uuid", | |
466 blob_data_->uuid()); | |
467 DidReadItem(result); | |
468 } | |
469 | |
470 void BlobReader::DidReadItem(int result) { | |
471 DCHECK(io_pending_) << "Asynchronous IO completed while IO wasn't pending?"; | |
472 io_pending_ = false; | |
473 if (result <= 0) { | |
474 InvalidateCallbacksAndDone(result, read_callback_); | |
475 return; | |
476 } | |
477 AdvanceBytesRead(result); | |
478 ContinueAsyncReadLoop(); | |
479 } | |
480 | |
481 int BlobReader::BytesReadCompleted() { | |
482 int bytes_read = read_buf_->BytesConsumed(); | |
483 read_buf_ = nullptr; | |
484 return bytes_read; | |
485 } | |
486 | |
487 int BlobReader::ComputeBytesToRead() const { | |
488 uint64_t current_item_length = item_length_list_[current_item_index_]; | |
489 | |
490 uint64_t item_remaining = current_item_length - current_item_offset_; | |
491 uint64_t buf_remaining = read_buf_->BytesRemaining(); | |
492 uint64_t max_int_value = std::numeric_limits<int>::max(); | |
493 // Here we make sure we don't overflow 'max int'. | |
494 uint64_t min = std::min( | |
495 std::min(std::min(item_remaining, buf_remaining), remaining_bytes_), | |
496 max_int_value); | |
497 | |
498 return static_cast<int>(min); | |
499 } | |
500 | |
501 FileStreamReader* BlobReader::GetOrCreateFileReaderAtIndex(size_t index) { | |
502 const auto& items = blob_data_->items(); | |
503 DCHECK_LT(index, items.size()); | |
504 const BlobDataItem& item = *items.at(index); | |
505 if (!IsFileType(item.type())) | |
506 return nullptr; | |
507 auto it = index_to_reader_.find(index); | |
508 if (it != index_to_reader_.end()) { | |
509 DCHECK(it->second); | |
510 return it->second; | |
511 } | |
512 scoped_ptr<FileStreamReader> reader = CreateFileStreamReader(item, 0); | |
513 FileStreamReader* ret_value = reader.get(); | |
514 if (!ret_value) | |
515 return nullptr; | |
516 index_to_reader_[index] = reader.release(); | |
517 return ret_value; | |
518 } | |
519 | |
520 scoped_ptr<FileStreamReader> BlobReader::CreateFileStreamReader( | |
521 const BlobDataItem& item, | |
522 uint64_t additional_offset) { | |
523 DCHECK(IsFileType(item.type())); | |
524 | |
525 switch (item.type()) { | |
526 case DataElement::TYPE_FILE: | |
527 return file_stream_provider_->CreateForLocalFile( | |
528 file_task_runner_.get(), item.path(), | |
529 item.offset() + additional_offset, | |
530 item.expected_modification_time()) | |
531 .Pass(); | |
532 case DataElement::TYPE_FILE_FILESYSTEM: | |
533 return file_stream_provider_ | |
534 ->CreateFileStreamReader( | |
535 item.filesystem_url(), item.offset() + additional_offset, | |
536 item.length() == std::numeric_limits<uint64_t>::max() | |
537 ? storage::kMaximumLength | |
538 : item.length() - additional_offset, | |
539 item.expected_modification_time()) | |
540 .Pass(); | |
541 case DataElement::TYPE_BLOB: | |
542 case DataElement::TYPE_BYTES: | |
543 case DataElement::TYPE_DISK_CACHE_ENTRY: | |
544 case DataElement::TYPE_UNKNOWN: | |
545 break; | |
546 } | |
547 | |
548 NOTREACHED(); | |
549 return nullptr; | |
550 } | |
551 | |
552 void BlobReader::SetFileReaderAtIndex(size_t index, | |
553 scoped_ptr<FileStreamReader> reader) { | |
554 auto found = index_to_reader_.find(current_item_index_); | |
555 if (found != index_to_reader_.end()) { | |
556 if (found->second) { | |
557 delete found->second; | |
558 } | |
559 if (!reader.get()) { | |
560 index_to_reader_.erase(found); | |
561 return; | |
562 } | |
563 found->second = reader.release(); | |
564 } else if (reader.get()) { | |
565 index_to_reader_[current_item_index_] = reader.release(); | |
566 } | |
567 } | |
568 | |
569 } // namespace storage | |
OLD | NEW |