OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "storage/browser/blob/blob_reader.h" |
| 6 |
| 7 #include <algorithm> |
| 8 #include <limits> |
| 9 |
| 10 #include "base/bind.h" |
| 11 #include "base/sequenced_task_runner.h" |
| 12 #include "base/stl_util.h" |
| 13 #include "base/time/time.h" |
| 14 #include "base/trace_event/trace_event.h" |
| 15 #include "net/base/io_buffer.h" |
| 16 #include "net/base/net_errors.h" |
| 17 #include "net/disk_cache/disk_cache.h" |
| 18 #include "storage/browser/blob/blob_data_handle.h" |
| 19 #include "storage/browser/blob/blob_data_snapshot.h" |
| 20 #include "storage/browser/fileapi/file_stream_reader.h" |
| 21 #include "storage/browser/fileapi/file_system_context.h" |
| 22 #include "storage/browser/fileapi/file_system_url.h" |
| 23 #include "storage/common/data_element.h" |
| 24 |
| 25 namespace storage { |
| 26 namespace { |
| 27 bool IsFileType(DataElement::Type type) { |
| 28 switch (type) { |
| 29 case DataElement::TYPE_FILE: |
| 30 case DataElement::TYPE_FILE_FILESYSTEM: |
| 31 return true; |
| 32 default: |
| 33 return false; |
| 34 } |
| 35 } |
| 36 } // namespace |
| 37 |
| 38 BlobReader::FileStreamReaderProvider::~FileStreamReaderProvider() {} |
| 39 |
| 40 BlobReader::BlobReader( |
| 41 const BlobDataHandle* blob_handle, |
| 42 scoped_ptr<FileStreamReaderProvider> file_stream_provider, |
| 43 base::SequencedTaskRunner* file_task_runner) |
| 44 : file_stream_provider_(file_stream_provider.Pass()), |
| 45 file_task_runner_(file_task_runner), |
| 46 net_error_(net::OK), |
| 47 weak_factory_(this) { |
| 48 if (blob_handle) { |
| 49 blob_data_ = blob_handle->CreateSnapshot().Pass(); |
| 50 } |
| 51 } |
| 52 |
| 53 BlobReader::~BlobReader() { |
| 54 STLDeleteValues(&index_to_reader_); |
| 55 } |
| 56 |
| 57 BlobReader::Status BlobReader::CalculateSize( |
| 58 const net::CompletionCallback& done) { |
| 59 DCHECK(!total_size_calculated_); |
| 60 DCHECK(size_callback_.is_null()); |
| 61 if (!blob_data_.get()) { |
| 62 return ReportError(net::ERR_FILE_NOT_FOUND); |
| 63 } |
| 64 |
| 65 net_error_ = net::OK; |
| 66 total_size_ = 0; |
| 67 const auto& items = blob_data_->items(); |
| 68 item_length_list_.resize(items.size()); |
| 69 pending_get_file_info_count_ = 0; |
| 70 for (size_t i = 0; i < items.size(); ++i) { |
| 71 const BlobDataItem& item = *items.at(i); |
| 72 if (IsFileType(item.type())) { |
| 73 ++pending_get_file_info_count_; |
| 74 storage::FileStreamReader* const reader = GetOrCreateFileReaderAtIndex(i); |
| 75 if (!reader) { |
| 76 return ReportError(net::ERR_FAILED); |
| 77 } |
| 78 int64_t length_output = reader->GetLength(base::Bind( |
| 79 &BlobReader::DidGetFileItemLength, weak_factory_.GetWeakPtr(), i)); |
| 80 if (length_output == net::ERR_IO_PENDING) { |
| 81 continue; |
| 82 } |
| 83 if (length_output < 0) { |
| 84 return ReportError(length_output); |
| 85 } |
| 86 // We got the length right away |
| 87 --pending_get_file_info_count_; |
| 88 uint64_t resolved_length; |
| 89 if (!ResolveFileItemLength(item, length_output, &resolved_length)) { |
| 90 return ReportError(net::ERR_FILE_NOT_FOUND); |
| 91 } |
| 92 if (!AddItemLength(i, resolved_length)) { |
| 93 return ReportError(net::ERR_FAILED); |
| 94 } |
| 95 continue; |
| 96 } |
| 97 |
| 98 if (!AddItemLength(i, item.length())) |
| 99 return ReportError(net::ERR_FAILED); |
| 100 } |
| 101 |
| 102 if (pending_get_file_info_count_ == 0) { |
| 103 DidCountSize(); |
| 104 return Status::DONE; |
| 105 } |
| 106 // Note: We only set the callback if we know that we're an async operation. |
| 107 size_callback_ = done; |
| 108 return Status::IO_PENDING; |
| 109 } |
| 110 |
| 111 BlobReader::Status BlobReader::SetReadRange(uint64_t offset, uint64_t length) { |
| 112 if (!blob_data_.get()) { |
| 113 return ReportError(net::ERR_FILE_NOT_FOUND); |
| 114 } |
| 115 if (!total_size_calculated_) { |
| 116 return ReportError(net::ERR_FAILED); |
| 117 } |
| 118 if (offset + length > total_size_) { |
| 119 return ReportError(net::ERR_FILE_NOT_FOUND); |
| 120 } |
| 121 // Skip the initial items that are not in the range. |
| 122 remaining_bytes_ = length; |
| 123 const auto& items = blob_data_->items(); |
| 124 for (current_item_index_ = 0; |
| 125 current_item_index_ < items.size() && |
| 126 offset >= item_length_list_[current_item_index_]; |
| 127 ++current_item_index_) { |
| 128 offset -= item_length_list_[current_item_index_]; |
| 129 } |
| 130 |
| 131 // Set the offset that need to jump to for the first item in the range. |
| 132 current_item_offset_ = offset; |
| 133 if (current_item_offset_ == 0) |
| 134 return Status::DONE; |
| 135 |
| 136 // Adjust the offset of the first stream if it is of file type. |
| 137 const BlobDataItem& item = *items.at(current_item_index_); |
| 138 if (IsFileType(item.type())) { |
| 139 SetFileReaderAtIndex(current_item_index_, |
| 140 CreateFileStreamReader(item, offset)); |
| 141 } |
| 142 return Status::DONE; |
| 143 } |
| 144 |
| 145 BlobReader::Status BlobReader::Read(net::IOBuffer* buffer, |
| 146 size_t dest_size, |
| 147 int* bytes_read, |
| 148 net::CompletionCallback done) { |
| 149 DCHECK(bytes_read); |
| 150 DCHECK_GE(remaining_bytes_, 0ul); |
| 151 DCHECK(read_callback_.is_null()); |
| 152 |
| 153 *bytes_read = 0; |
| 154 if (!blob_data_.get()) { |
| 155 return ReportError(net::ERR_FILE_NOT_FOUND); |
| 156 } |
| 157 if (!total_size_calculated_) { |
| 158 return ReportError(net::ERR_FAILED); |
| 159 } |
| 160 |
| 161 // Bail out immediately if we encountered an error. |
| 162 if (net_error_ != net::OK) { |
| 163 return Status::NET_ERROR; |
| 164 } |
| 165 |
| 166 DCHECK_GE(dest_size, 0ul); |
| 167 if (remaining_bytes_ < static_cast<uint64_t>(dest_size)) |
| 168 dest_size = static_cast<int>(remaining_bytes_); |
| 169 |
| 170 // If we should copy zero bytes because |remaining_bytes_| is zero, short |
| 171 // circuit here. |
| 172 if (!dest_size) { |
| 173 *bytes_read = 0; |
| 174 return Status::DONE; |
| 175 } |
| 176 |
| 177 // Keep track of the buffer. |
| 178 DCHECK(!read_buf_.get()); |
| 179 read_buf_ = new net::DrainableIOBuffer(buffer, dest_size); |
| 180 |
| 181 Status status = ReadLoop(bytes_read); |
| 182 if (status == Status::IO_PENDING) |
| 183 read_callback_ = done; |
| 184 return status; |
| 185 } |
| 186 |
| 187 void BlobReader::Kill() { |
| 188 DeleteCurrentFileReader(); |
| 189 weak_factory_.InvalidateWeakPtrs(); |
| 190 } |
| 191 |
| 192 bool BlobReader::IsInMemory() const { |
| 193 if (!blob_data_.get()) { |
| 194 return true; |
| 195 } |
| 196 for (const auto& item : blob_data_->items()) { |
| 197 if (item->type() != DataElement::TYPE_BYTES) { |
| 198 return false; |
| 199 } |
| 200 } |
| 201 return true; |
| 202 } |
| 203 |
| 204 void BlobReader::InvalidateCallbacksAndDone(int net_error, |
| 205 net::CompletionCallback done) { |
| 206 net_error_ = net_error; |
| 207 weak_factory_.InvalidateWeakPtrs(); |
| 208 size_callback_.Reset(); |
| 209 read_callback_.Reset(); |
| 210 read_buf_ = nullptr; |
| 211 done.Run(net_error); |
| 212 } |
| 213 |
| 214 BlobReader::Status BlobReader::ReportError(int net_error) { |
| 215 net_error_ = net_error; |
| 216 return Status::NET_ERROR; |
| 217 } |
| 218 |
| 219 bool BlobReader::AddItemLength(size_t index, uint64_t item_length) { |
| 220 if (item_length > std::numeric_limits<uint64_t>::max() - total_size_) { |
| 221 return false; |
| 222 } |
| 223 |
| 224 // Cache the size and add it to the total size. |
| 225 DCHECK_LT(index, item_length_list_.size()); |
| 226 item_length_list_[index] = item_length; |
| 227 total_size_ += item_length; |
| 228 return true; |
| 229 } |
| 230 |
| 231 bool BlobReader::ResolveFileItemLength(const BlobDataItem& item, |
| 232 int64_t total_length, |
| 233 uint64_t* output_length) { |
| 234 DCHECK(IsFileType(item.type())); |
| 235 DCHECK(output_length); |
| 236 uint64_t file_length = total_length; |
| 237 uint64_t item_offset = item.offset(); |
| 238 uint64_t item_length = item.length(); |
| 239 if (item_offset > file_length) { |
| 240 return false; |
| 241 } |
| 242 |
| 243 uint64 max_length = file_length - item_offset; |
| 244 |
| 245 // If item length is undefined, then we need to use the file size being |
| 246 // resolved in the real time. |
| 247 if (item_length == std::numeric_limits<uint64>::max()) { |
| 248 item_length = max_length; |
| 249 } else if (item_length > max_length) { |
| 250 return false; |
| 251 } |
| 252 |
| 253 *output_length = item_length; |
| 254 return true; |
| 255 } |
| 256 |
| 257 void BlobReader::DidGetFileItemLength(size_t index, int64_t result) { |
| 258 // Do nothing if we have encountered an error. |
| 259 if (net_error_) |
| 260 return; |
| 261 |
| 262 if (result == net::ERR_UPLOAD_FILE_CHANGED) |
| 263 result = net::ERR_FILE_NOT_FOUND; |
| 264 if (result < 0) { |
| 265 InvalidateCallbacksAndDone(result, size_callback_); |
| 266 return; |
| 267 } |
| 268 |
| 269 const auto& items = blob_data_->items(); |
| 270 DCHECK_LT(index, items.size()); |
| 271 const BlobDataItem& item = *items.at(index); |
| 272 uint64_t length; |
| 273 if (!ResolveFileItemLength(item, result, &length)) { |
| 274 InvalidateCallbacksAndDone(net::ERR_FILE_NOT_FOUND, size_callback_); |
| 275 return; |
| 276 } |
| 277 if (!AddItemLength(index, length)) { |
| 278 InvalidateCallbacksAndDone(net::ERR_FAILED, size_callback_); |
| 279 return; |
| 280 } |
| 281 |
| 282 if (--pending_get_file_info_count_ == 0) |
| 283 DidCountSize(); |
| 284 } |
| 285 |
| 286 void BlobReader::DidCountSize() { |
| 287 DCHECK(!net_error_); |
| 288 total_size_calculated_ = true; |
| 289 remaining_bytes_ = total_size_; |
| 290 // This is set only if we're async. |
| 291 if (!size_callback_.is_null()) { |
| 292 net::CompletionCallback done = size_callback_; |
| 293 size_callback_.Reset(); |
| 294 done.Run(net::OK); |
| 295 } |
| 296 } |
| 297 |
| 298 BlobReader::Status BlobReader::ReadLoop(int* bytes_read) { |
| 299 // Read until we encounter an error or could not get the data immediately. |
| 300 while (remaining_bytes_ > 0 && read_buf_->BytesRemaining() > 0) { |
| 301 Status read_status = ReadItem(); |
| 302 if (read_status == Status::DONE) { |
| 303 continue; |
| 304 } |
| 305 return read_status; |
| 306 } |
| 307 |
| 308 *bytes_read = BytesReadCompleted(); |
| 309 return Status::DONE; |
| 310 } |
| 311 |
| 312 BlobReader::Status BlobReader::ReadItem() { |
| 313 // Are we done with reading all the blob data? |
| 314 if (remaining_bytes_ == 0) |
| 315 return Status::DONE; |
| 316 |
| 317 const auto& items = blob_data_->items(); |
| 318 // If we get to the last item but still expect something to read, bail out |
| 319 // since something is wrong. |
| 320 if (current_item_index_ >= items.size()) { |
| 321 return ReportError(net::ERR_FAILED); |
| 322 } |
| 323 |
| 324 // Compute the bytes to read for current item. |
| 325 int bytes_to_read = ComputeBytesToRead(); |
| 326 |
| 327 // If nothing to read for current item, advance to next item. |
| 328 if (bytes_to_read == 0) { |
| 329 AdvanceItem(); |
| 330 return Status::DONE; |
| 331 } |
| 332 |
| 333 // Do the reading. |
| 334 const BlobDataItem& item = *items.at(current_item_index_); |
| 335 if (item.type() == DataElement::TYPE_BYTES) { |
| 336 ReadBytesItem(item, bytes_to_read); |
| 337 return Status::DONE; |
| 338 } |
| 339 if (item.type() == DataElement::TYPE_DISK_CACHE_ENTRY) |
| 340 return ReadDiskCacheEntryItem(item, bytes_to_read); |
| 341 if (!IsFileType(item.type())) { |
| 342 NOTREACHED(); |
| 343 return ReportError(net::ERR_FAILED); |
| 344 } |
| 345 storage::FileStreamReader* const reader = |
| 346 GetOrCreateFileReaderAtIndex(current_item_index_); |
| 347 if (!reader) { |
| 348 return ReportError(net::ERR_FAILED); |
| 349 } |
| 350 |
| 351 return ReadFileItem(reader, bytes_to_read); |
| 352 } |
| 353 |
| 354 void BlobReader::AdvanceItem() { |
| 355 // Close the file if the current item is a file. |
| 356 DeleteCurrentFileReader(); |
| 357 |
| 358 // Advance to the next item. |
| 359 current_item_index_++; |
| 360 current_item_offset_ = 0; |
| 361 } |
| 362 |
| 363 void BlobReader::AdvanceBytesRead(int result) { |
| 364 DCHECK_GT(result, 0); |
| 365 |
| 366 // Do we finish reading the current item? |
| 367 current_item_offset_ += result; |
| 368 if (current_item_offset_ == item_length_list_[current_item_index_]) |
| 369 AdvanceItem(); |
| 370 |
| 371 // Subtract the remaining bytes. |
| 372 remaining_bytes_ -= result; |
| 373 DCHECK_GE(remaining_bytes_, 0ul); |
| 374 |
| 375 // Adjust the read buffer. |
| 376 read_buf_->DidConsume(result); |
| 377 DCHECK_GE(read_buf_->BytesRemaining(), 0); |
| 378 } |
| 379 |
| 380 void BlobReader::ReadBytesItem(const BlobDataItem& item, int bytes_to_read) { |
| 381 TRACE_EVENT1("Blob", "BlobReader::ReadBytesItem", "uuid", blob_data_->uuid()); |
| 382 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read); |
| 383 |
| 384 memcpy(read_buf_->data(), item.bytes() + item.offset() + current_item_offset_, |
| 385 bytes_to_read); |
| 386 |
| 387 AdvanceBytesRead(bytes_to_read); |
| 388 } |
| 389 |
| 390 BlobReader::Status BlobReader::ReadFileItem(FileStreamReader* reader, |
| 391 int bytes_to_read) { |
| 392 DCHECK(!io_pending_) |
| 393 << "Can't begin IO while another IO operation is pending."; |
| 394 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read); |
| 395 DCHECK(reader); |
| 396 TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadFileItem", this, "uuid", |
| 397 blob_data_->uuid()); |
| 398 const int result = reader->Read( |
| 399 read_buf_.get(), bytes_to_read, |
| 400 base::Bind(&BlobReader::DidReadFile, weak_factory_.GetWeakPtr())); |
| 401 if (result >= 0) { |
| 402 AdvanceBytesRead(result); |
| 403 return Status::DONE; |
| 404 } |
| 405 if (result == net::ERR_IO_PENDING) { |
| 406 io_pending_ = true; |
| 407 return Status::IO_PENDING; |
| 408 } |
| 409 return ReportError(result); |
| 410 } |
| 411 |
| 412 void BlobReader::DidReadFile(int result) { |
| 413 TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadFileItem", this, "uuid", |
| 414 blob_data_->uuid()); |
| 415 DidReadItem(result); |
| 416 } |
| 417 |
| 418 void BlobReader::ContinueAsyncReadLoop() { |
| 419 int bytes_read = 0; |
| 420 Status read_status = ReadLoop(&bytes_read); |
| 421 switch (read_status) { |
| 422 case Status::DONE: { |
| 423 net::CompletionCallback done = read_callback_; |
| 424 read_callback_.Reset(); |
| 425 done.Run(bytes_read); |
| 426 return; |
| 427 } |
| 428 case Status::NET_ERROR: |
| 429 InvalidateCallbacksAndDone(net_error_, read_callback_); |
| 430 return; |
| 431 case Status::IO_PENDING: |
| 432 return; |
| 433 } |
| 434 } |
| 435 |
| 436 void BlobReader::DeleteCurrentFileReader() { |
| 437 SetFileReaderAtIndex(current_item_index_, scoped_ptr<FileStreamReader>()); |
| 438 } |
| 439 |
| 440 BlobReader::Status BlobReader::ReadDiskCacheEntryItem(const BlobDataItem& item, |
| 441 int bytes_to_read) { |
| 442 DCHECK(!io_pending_) |
| 443 << "Can't begin IO while another IO operation is pending."; |
| 444 TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadDiskCacheItem", this, |
| 445 "uuid", blob_data_->uuid()); |
| 446 DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read); |
| 447 |
| 448 const int result = item.disk_cache_entry()->ReadData( |
| 449 item.disk_cache_stream_index(), current_item_offset_, read_buf_.get(), |
| 450 bytes_to_read, base::Bind(&BlobReader::DidReadDiskCacheEntry, |
| 451 weak_factory_.GetWeakPtr())); |
| 452 if (result >= 0) { |
| 453 AdvanceBytesRead(result); |
| 454 return Status::DONE; |
| 455 } |
| 456 if (result == net::ERR_IO_PENDING) { |
| 457 io_pending_ = true; |
| 458 return Status::IO_PENDING; |
| 459 } |
| 460 return ReportError(result); |
| 461 } |
| 462 |
| 463 void BlobReader::DidReadDiskCacheEntry(int result) { |
| 464 TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadDiskCacheItem", this, "uuid", |
| 465 blob_data_->uuid()); |
| 466 DidReadItem(result); |
| 467 } |
| 468 |
| 469 void BlobReader::DidReadItem(int result) { |
| 470 DCHECK(io_pending_) << "Asynchronous IO completed while IO wasn't pending?"; |
| 471 io_pending_ = false; |
| 472 if (result <= 0) { |
| 473 InvalidateCallbacksAndDone(result, read_callback_); |
| 474 return; |
| 475 } |
| 476 AdvanceBytesRead(result); |
| 477 ContinueAsyncReadLoop(); |
| 478 } |
| 479 |
| 480 int BlobReader::BytesReadCompleted() { |
| 481 int bytes_read = read_buf_->BytesConsumed(); |
| 482 read_buf_ = nullptr; |
| 483 return bytes_read; |
| 484 } |
| 485 |
| 486 int BlobReader::ComputeBytesToRead() const { |
| 487 uint64_t current_item_length = item_length_list_[current_item_index_]; |
| 488 |
| 489 uint64_t item_remaining = current_item_length - current_item_offset_; |
| 490 uint64_t buf_remaining = read_buf_->BytesRemaining(); |
| 491 uint64_t max_int_value = std::numeric_limits<int>::max(); |
| 492 // Here we make sure we don't overflow 'max int'. |
| 493 uint64_t min = std::min( |
| 494 std::min(std::min(item_remaining, buf_remaining), remaining_bytes_), |
| 495 max_int_value); |
| 496 |
| 497 return static_cast<int>(min); |
| 498 } |
| 499 |
| 500 FileStreamReader* BlobReader::GetOrCreateFileReaderAtIndex(size_t index) { |
| 501 const auto& items = blob_data_->items(); |
| 502 DCHECK_LT(index, items.size()); |
| 503 const BlobDataItem& item = *items.at(index); |
| 504 if (!IsFileType(item.type())) |
| 505 return nullptr; |
| 506 auto it = index_to_reader_.find(index); |
| 507 if (it != index_to_reader_.end()) { |
| 508 DCHECK(it->second); |
| 509 return it->second; |
| 510 } |
| 511 scoped_ptr<FileStreamReader> reader = CreateFileStreamReader(item, 0); |
| 512 FileStreamReader* ret_value = reader.get(); |
| 513 if (!ret_value) |
| 514 return nullptr; |
| 515 index_to_reader_[index] = reader.release(); |
| 516 return ret_value; |
| 517 } |
| 518 |
| 519 scoped_ptr<FileStreamReader> BlobReader::CreateFileStreamReader( |
| 520 const BlobDataItem& item, |
| 521 uint64_t additional_offset) { |
| 522 DCHECK(IsFileType(item.type())); |
| 523 |
| 524 switch (item.type()) { |
| 525 case DataElement::TYPE_FILE: |
| 526 return file_stream_provider_->CreateForLocalFile( |
| 527 file_task_runner_.get(), item.path(), |
| 528 item.offset() + additional_offset, |
| 529 item.expected_modification_time()) |
| 530 .Pass(); |
| 531 case DataElement::TYPE_FILE_FILESYSTEM: |
| 532 return file_stream_provider_ |
| 533 ->CreateFileStreamReader( |
| 534 item.filesystem_url(), item.offset() + additional_offset, |
| 535 item.length() == std::numeric_limits<uint64_t>::max() |
| 536 ? storage::kMaximumLength |
| 537 : item.length() - additional_offset, |
| 538 item.expected_modification_time()) |
| 539 .Pass(); |
| 540 case DataElement::TYPE_BLOB: |
| 541 case DataElement::TYPE_BYTES: |
| 542 case DataElement::TYPE_DISK_CACHE_ENTRY: |
| 543 case DataElement::TYPE_UNKNOWN: |
| 544 break; |
| 545 } |
| 546 |
| 547 NOTREACHED(); |
| 548 return nullptr; |
| 549 } |
| 550 |
| 551 void BlobReader::SetFileReaderAtIndex(size_t index, |
| 552 scoped_ptr<FileStreamReader> reader) { |
| 553 auto found = index_to_reader_.find(current_item_index_); |
| 554 if (found != index_to_reader_.end()) { |
| 555 if (found->second) { |
| 556 delete found->second; |
| 557 } |
| 558 if (!reader.get()) { |
| 559 index_to_reader_.erase(found); |
| 560 return; |
| 561 } |
| 562 found->second = reader.release(); |
| 563 } else if (reader.get()) { |
| 564 index_to_reader_[current_item_index_] = reader.release(); |
| 565 } |
| 566 } |
| 567 |
| 568 } // namespace storage |
OLD | NEW |