Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(90)

Side by Side Diff: net/disk_cache/entry_impl.cc

Issue 6085013: Start reordering the methods in headers in net/. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/entry_impl.h" 5 #include "net/disk_cache/entry_impl.h"
6 6
7 #include "base/message_loop.h" 7 #include "base/message_loop.h"
8 #include "base/metrics/histogram.h" 8 #include "base/metrics/histogram.h"
9 #include "base/string_util.h" 9 #include "base/string_util.h"
10 #include "base/values.h" 10 #include "base/values.h"
(...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after
368 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) 368 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
369 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), read_only_(read_only) { 369 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), read_only_(read_only) {
370 entry_.LazyInit(backend->File(address), address); 370 entry_.LazyInit(backend->File(address), address);
371 doomed_ = false; 371 doomed_ = false;
372 backend_ = backend; 372 backend_ = backend;
373 for (int i = 0; i < kNumStreams; i++) { 373 for (int i = 0; i < kNumStreams; i++) {
374 unreported_size_[i] = 0; 374 unreported_size_[i] = 0;
375 } 375 }
376 } 376 }
377 377
378 // When an entry is deleted from the cache, we clean up all the data associated
379 // with it for two reasons: to simplify the reuse of the block (we know that any
380 // unused block is filled with zeros), and to simplify the handling of write /
381 // read partial information from an entry (don't have to worry about returning
382 // data related to a previous cache entry because the range was not fully
383 // written before).
384 EntryImpl::~EntryImpl() {
385 Log("~EntryImpl in");
386
387 // Save the sparse info to disk. This will generate IO for this entry and
388 // maybe for a child entry, so it is important to do it before deleting this
389 // entry.
390 sparse_.reset();
391
392 // Remove this entry from the list of open entries.
393 backend_->OnEntryDestroyBegin(entry_.address());
394
395 if (doomed_) {
396 DeleteEntryData(true);
397 } else {
398 net_log_.AddEvent(net::NetLog::TYPE_DISK_CACHE_CLOSE, NULL);
399 bool ret = true;
400 for (int index = 0; index < kNumStreams; index++) {
401 if (user_buffers_[index].get()) {
402 if (!(ret = Flush(index, 0)))
403 LOG(ERROR) << "Failed to save user data";
404 }
405 if (unreported_size_[index]) {
406 backend_->ModifyStorageSize(
407 entry_.Data()->data_size[index] - unreported_size_[index],
408 entry_.Data()->data_size[index]);
409 }
410 }
411
412 if (!ret) {
413 // There was a failure writing the actual data. Mark the entry as dirty.
414 int current_id = backend_->GetCurrentEntryId();
415 node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
416 node_.Store();
417 } else if (node_.HasData() && node_.Data()->dirty) {
418 node_.Data()->dirty = 0;
419 node_.Store();
420 }
421 }
422
423 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
424 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY, NULL);
425 backend_->OnEntryDestroyEnd();
426 }
427
428 void EntryImpl::Doom() {
429 backend_->background_queue()->DoomEntryImpl(this);
430 }
431
432 void EntryImpl::Close() {
433 backend_->background_queue()->CloseEntryImpl(this);
434 }
435
436 std::string EntryImpl::GetKey() const {
437 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
438 if (entry->Data()->key_len <= kMaxInternalKeyLength)
439 return std::string(entry->Data()->key);
440
441 // We keep a copy of the key so that we can always return it, even if the
442 // backend is disabled.
443 if (!key_.empty())
444 return key_;
445
446 Addr address(entry->Data()->long_key);
447 DCHECK(address.is_initialized());
448 size_t offset = 0;
449 if (address.is_block_file())
450 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
451
452 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
453 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
454 kKeyFileIndex);
455
456 if (!key_file ||
457 !key_file->Read(WriteInto(&key_, entry->Data()->key_len + 1),
458 entry->Data()->key_len + 1, offset))
459 key_.clear();
460 return key_;
461 }
462
463 Time EntryImpl::GetLastUsed() const {
464 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
465 return Time::FromInternalValue(node->Data()->last_used);
466 }
467
468 Time EntryImpl::GetLastModified() const {
469 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
470 return Time::FromInternalValue(node->Data()->last_modified);
471 }
472
473 int32 EntryImpl::GetDataSize(int index) const {
474 if (index < 0 || index >= kNumStreams)
475 return 0;
476
477 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
478 return entry->Data()->data_size[index];
479 }
480
481 int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
482 net::CompletionCallback* callback) {
483 if (!callback)
484 return ReadDataImpl(index, offset, buf, buf_len, callback);
485
486 DCHECK(node_.Data()->dirty || read_only_);
487 if (index < 0 || index >= kNumStreams)
488 return net::ERR_INVALID_ARGUMENT;
489
490 int entry_size = entry_.Data()->data_size[index];
491 if (offset >= entry_size || offset < 0 || !buf_len)
492 return 0;
493
494 if (buf_len < 0)
495 return net::ERR_INVALID_ARGUMENT;
496
497 backend_->background_queue()->ReadData(this, index, offset, buf, buf_len,
498 callback);
499 return net::ERR_IO_PENDING;
500 }
501
502 int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
503 CompletionCallback* callback, bool truncate) {
504 if (!callback)
505 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
506
507 DCHECK(node_.Data()->dirty || read_only_);
508 if (index < 0 || index >= kNumStreams)
509 return net::ERR_INVALID_ARGUMENT;
510
511 if (offset < 0 || buf_len < 0)
512 return net::ERR_INVALID_ARGUMENT;
513
514 backend_->background_queue()->WriteData(this, index, offset, buf, buf_len,
515 truncate, callback);
516 return net::ERR_IO_PENDING;
517 }
518
519 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
520 net::CompletionCallback* callback) {
521 if (!callback)
522 return ReadSparseDataImpl(offset, buf, buf_len, callback);
523
524 backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len,
525 callback);
526 return net::ERR_IO_PENDING;
527 }
528
529 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
530 net::CompletionCallback* callback) {
531 if (!callback)
532 return WriteSparseDataImpl(offset, buf, buf_len, callback);
533
534 backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len,
535 callback);
536 return net::ERR_IO_PENDING;
537 }
538
539 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
540 CompletionCallback* callback) {
541 backend_->background_queue()->GetAvailableRange(this, offset, len, start,
542 callback);
543 return net::ERR_IO_PENDING;
544 }
545
546 bool EntryImpl::CouldBeSparse() const {
547 if (sparse_.get())
548 return true;
549
550 scoped_ptr<SparseControl> sparse;
551 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
552 return sparse->CouldBeSparse();
553 }
554
555 void EntryImpl::CancelSparseIO() {
556 backend_->background_queue()->CancelSparseIO(this);
557 }
558
559 int EntryImpl::ReadyForSparseIO(net::CompletionCallback* callback) {
560 if (!sparse_.get())
561 return net::OK;
562
563 backend_->background_queue()->ReadyForSparseIO(this, callback);
564 return net::ERR_IO_PENDING;
565 }
566
567 // ------------------------------------------------------------------------
568
569 void EntryImpl::DoomImpl() { 378 void EntryImpl::DoomImpl() {
570 if (doomed_) 379 if (doomed_)
571 return; 380 return;
572 381
573 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); 382 SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
574 backend_->InternalDoomEntry(this); 383 backend_->InternalDoomEntry(this);
575 } 384 }
576 385
577 int EntryImpl::ReadDataImpl(int index, int offset, net::IOBuffer* buf, 386 int EntryImpl::ReadDataImpl(int index, int offset, net::IOBuffer* buf,
578 int buf_len, CompletionCallback* callback) { 387 int buf_len, CompletionCallback* callback) {
(...skipping 28 matching lines...) Expand all
607 truncate); 416 truncate);
608 417
609 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { 418 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
610 net_log_.EndEvent( 419 net_log_.EndEvent(
611 net::NetLog::TYPE_DISK_CACHE_WRITE_DATA, 420 net::NetLog::TYPE_DISK_CACHE_WRITE_DATA,
612 make_scoped_refptr(new FileIOCompleteParameters(result))); 421 make_scoped_refptr(new FileIOCompleteParameters(result)));
613 } 422 }
614 return result; 423 return result;
615 } 424 }
616 425
617 int EntryImpl::InternalReadData(int index, int offset, net::IOBuffer* buf,
618 int buf_len, CompletionCallback* callback) {
619 DCHECK(node_.Data()->dirty || read_only_);
620 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
621 if (index < 0 || index >= kNumStreams)
622 return net::ERR_INVALID_ARGUMENT;
623
624 int entry_size = entry_.Data()->data_size[index];
625 if (offset >= entry_size || offset < 0 || !buf_len)
626 return 0;
627
628 if (buf_len < 0)
629 return net::ERR_INVALID_ARGUMENT;
630
631 TimeTicks start = TimeTicks::Now();
632
633 if (offset + buf_len > entry_size)
634 buf_len = entry_size - offset;
635
636 UpdateRank(false);
637
638 backend_->OnEvent(Stats::READ_DATA);
639 backend_->OnRead(buf_len);
640
641 Addr address(entry_.Data()->data_addr[index]);
642 int eof = address.is_initialized() ? entry_size : 0;
643 if (user_buffers_[index].get() &&
644 user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
645 // Complete the operation locally.
646 buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
647 ReportIOTime(kRead, start);
648 return buf_len;
649 }
650
651 address.set_value(entry_.Data()->data_addr[index]);
652 DCHECK(address.is_initialized());
653 if (!address.is_initialized())
654 return net::ERR_FAILED;
655
656 File* file = GetBackingFile(address, index);
657 if (!file)
658 return net::ERR_FAILED;
659
660 size_t file_offset = offset;
661 if (address.is_block_file()) {
662 DCHECK_LE(offset + buf_len, kMaxBlockSize);
663 file_offset += address.start_block() * address.BlockSize() +
664 kBlockHeaderSize;
665 }
666
667 SyncCallback* io_callback = NULL;
668 if (callback) {
669 io_callback = new SyncCallback(this, buf, callback,
670 net::NetLog::TYPE_DISK_CACHE_READ_DATA);
671 }
672
673 bool completed;
674 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
675 if (io_callback)
676 io_callback->Discard();
677 return net::ERR_FAILED;
678 }
679
680 if (io_callback && completed)
681 io_callback->Discard();
682
683 ReportIOTime(kRead, start);
684 return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
685 }
686
687 int EntryImpl::InternalWriteData(int index, int offset, net::IOBuffer* buf,
688 int buf_len, CompletionCallback* callback,
689 bool truncate) {
690 DCHECK(node_.Data()->dirty || read_only_);
691 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
692 if (index < 0 || index >= kNumStreams)
693 return net::ERR_INVALID_ARGUMENT;
694
695 if (offset < 0 || buf_len < 0)
696 return net::ERR_INVALID_ARGUMENT;
697
698 int max_file_size = backend_->MaxFileSize();
699
700 // offset or buf_len could be negative numbers.
701 if (offset > max_file_size || buf_len > max_file_size ||
702 offset + buf_len > max_file_size) {
703 int size = offset + buf_len;
704 if (size <= max_file_size)
705 size = kint32max;
706 backend_->TooMuchStorageRequested(size);
707 return net::ERR_FAILED;
708 }
709
710 TimeTicks start = TimeTicks::Now();
711
712 // Read the size at this point (it may change inside prepare).
713 int entry_size = entry_.Data()->data_size[index];
714 bool extending = entry_size < offset + buf_len;
715 truncate = truncate && entry_size > offset + buf_len;
716 Trace("To PrepareTarget 0x%x", entry_.address().value());
717 if (!PrepareTarget(index, offset, buf_len, truncate))
718 return net::ERR_FAILED;
719
720 Trace("From PrepareTarget 0x%x", entry_.address().value());
721 if (extending || truncate)
722 UpdateSize(index, entry_size, offset + buf_len);
723
724 UpdateRank(true);
725
726 backend_->OnEvent(Stats::WRITE_DATA);
727 backend_->OnWrite(buf_len);
728
729 if (user_buffers_[index].get()) {
730 // Complete the operation locally.
731 user_buffers_[index]->Write(offset, buf, buf_len);
732 ReportIOTime(kWrite, start);
733 return buf_len;
734 }
735
736 Addr address(entry_.Data()->data_addr[index]);
737 if (offset + buf_len == 0) {
738 if (truncate) {
739 DCHECK(!address.is_initialized());
740 }
741 return 0;
742 }
743
744 File* file = GetBackingFile(address, index);
745 if (!file)
746 return net::ERR_FAILED;
747
748 size_t file_offset = offset;
749 if (address.is_block_file()) {
750 DCHECK_LE(offset + buf_len, kMaxBlockSize);
751 file_offset += address.start_block() * address.BlockSize() +
752 kBlockHeaderSize;
753 } else if (truncate || (extending && !buf_len)) {
754 if (!file->SetLength(offset + buf_len))
755 return net::ERR_FAILED;
756 }
757
758 if (!buf_len)
759 return 0;
760
761 SyncCallback* io_callback = NULL;
762 if (callback) {
763 io_callback = new SyncCallback(this, buf, callback,
764 net::NetLog::TYPE_DISK_CACHE_WRITE_DATA);
765 }
766
767 bool completed;
768 if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
769 &completed)) {
770 if (io_callback)
771 io_callback->Discard();
772 return net::ERR_FAILED;
773 }
774
775 if (io_callback && completed)
776 io_callback->Discard();
777
778 ReportIOTime(kWrite, start);
779 return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
780 }
781
782 int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len, 426 int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len,
783 CompletionCallback* callback) { 427 CompletionCallback* callback) {
784 DCHECK(node_.Data()->dirty || read_only_); 428 DCHECK(node_.Data()->dirty || read_only_);
785 int result = InitSparseData(); 429 int result = InitSparseData();
786 if (net::OK != result) 430 if (net::OK != result)
787 return result; 431 return result;
788 432
789 TimeTicks start = TimeTicks::Now(); 433 TimeTicks start = TimeTicks::Now();
790 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, 434 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
791 callback); 435 callback);
(...skipping 28 matching lines...) Expand all
820 return; 464 return;
821 465
822 sparse_->CancelIO(); 466 sparse_->CancelIO();
823 } 467 }
824 468
825 int EntryImpl::ReadyForSparseIOImpl(CompletionCallback* callback) { 469 int EntryImpl::ReadyForSparseIOImpl(CompletionCallback* callback) {
826 DCHECK(sparse_.get()); 470 DCHECK(sparse_.get());
827 return sparse_->ReadyToUse(callback); 471 return sparse_->ReadyToUse(callback);
828 } 472 }
829 473
830 // ------------------------------------------------------------------------
831
832 uint32 EntryImpl::GetHash() { 474 uint32 EntryImpl::GetHash() {
833 return entry_.Data()->hash; 475 return entry_.Data()->hash;
834 } 476 }
835 477
836 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, 478 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
837 uint32 hash) { 479 uint32 hash) {
838 Trace("Create entry In"); 480 Trace("Create entry In");
839 EntryStore* entry_store = entry_.Data(); 481 EntryStore* entry_store = entry_.Data();
840 RankingsNode* node = node_.Data(); 482 RankingsNode* node = node_.Data();
841 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); 483 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
1054 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY); 696 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY);
1055 net_log_.BeginEvent( 697 net_log_.BeginEvent(
1056 net::NetLog::TYPE_DISK_CACHE_ENTRY, 698 net::NetLog::TYPE_DISK_CACHE_ENTRY,
1057 make_scoped_refptr(new EntryCreationParameters(GetKey(), created))); 699 make_scoped_refptr(new EntryCreationParameters(GetKey(), created)));
1058 } 700 }
1059 701
1060 const net::BoundNetLog& EntryImpl::net_log() const { 702 const net::BoundNetLog& EntryImpl::net_log() const {
1061 return net_log_; 703 return net_log_;
1062 } 704 }
1063 705
706 void EntryImpl::Doom() {
707 backend_->background_queue()->DoomEntryImpl(this);
708 }
709
710 void EntryImpl::Close() {
711 backend_->background_queue()->CloseEntryImpl(this);
712 }
713
714 std::string EntryImpl::GetKey() const {
715 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
716 if (entry->Data()->key_len <= kMaxInternalKeyLength)
717 return std::string(entry->Data()->key);
718
719 // We keep a copy of the key so that we can always return it, even if the
720 // backend is disabled.
721 if (!key_.empty())
722 return key_;
723
724 Addr address(entry->Data()->long_key);
725 DCHECK(address.is_initialized());
726 size_t offset = 0;
727 if (address.is_block_file())
728 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
729
730 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
731 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
732 kKeyFileIndex);
733
734 if (!key_file ||
735 !key_file->Read(WriteInto(&key_, entry->Data()->key_len + 1),
736 entry->Data()->key_len + 1, offset))
737 key_.clear();
738 return key_;
739 }
740
741 Time EntryImpl::GetLastUsed() const {
742 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
743 return Time::FromInternalValue(node->Data()->last_used);
744 }
745
746 Time EntryImpl::GetLastModified() const {
747 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
748 return Time::FromInternalValue(node->Data()->last_modified);
749 }
750
751 int32 EntryImpl::GetDataSize(int index) const {
752 if (index < 0 || index >= kNumStreams)
753 return 0;
754
755 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
756 return entry->Data()->data_size[index];
757 }
758
759 int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
760 net::CompletionCallback* callback) {
761 if (!callback)
762 return ReadDataImpl(index, offset, buf, buf_len, callback);
763
764 DCHECK(node_.Data()->dirty || read_only_);
765 if (index < 0 || index >= kNumStreams)
766 return net::ERR_INVALID_ARGUMENT;
767
768 int entry_size = entry_.Data()->data_size[index];
769 if (offset >= entry_size || offset < 0 || !buf_len)
770 return 0;
771
772 if (buf_len < 0)
773 return net::ERR_INVALID_ARGUMENT;
774
775 backend_->background_queue()->ReadData(this, index, offset, buf, buf_len,
776 callback);
777 return net::ERR_IO_PENDING;
778 }
779
780 int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
781 CompletionCallback* callback, bool truncate) {
782 if (!callback)
783 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
784
785 DCHECK(node_.Data()->dirty || read_only_);
786 if (index < 0 || index >= kNumStreams)
787 return net::ERR_INVALID_ARGUMENT;
788
789 if (offset < 0 || buf_len < 0)
790 return net::ERR_INVALID_ARGUMENT;
791
792 backend_->background_queue()->WriteData(this, index, offset, buf, buf_len,
793 truncate, callback);
794 return net::ERR_IO_PENDING;
795 }
796
797 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
798 net::CompletionCallback* callback) {
799 if (!callback)
800 return ReadSparseDataImpl(offset, buf, buf_len, callback);
801
802 backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len,
803 callback);
804 return net::ERR_IO_PENDING;
805 }
806
807 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
808 net::CompletionCallback* callback) {
809 if (!callback)
810 return WriteSparseDataImpl(offset, buf, buf_len, callback);
811
812 backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len,
813 callback);
814 return net::ERR_IO_PENDING;
815 }
816
817 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
818 CompletionCallback* callback) {
819 backend_->background_queue()->GetAvailableRange(this, offset, len, start,
820 callback);
821 return net::ERR_IO_PENDING;
822 }
823
824 bool EntryImpl::CouldBeSparse() const {
825 if (sparse_.get())
826 return true;
827
828 scoped_ptr<SparseControl> sparse;
829 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
830 return sparse->CouldBeSparse();
831 }
832
833 void EntryImpl::CancelSparseIO() {
834 backend_->background_queue()->CancelSparseIO(this);
835 }
836
837 int EntryImpl::ReadyForSparseIO(net::CompletionCallback* callback) {
838 if (!sparse_.get())
839 return net::OK;
840
841 backend_->background_queue()->ReadyForSparseIO(this, callback);
842 return net::ERR_IO_PENDING;
843 }
844
845 // When an entry is deleted from the cache, we clean up all the data associated
846 // with it for two reasons: to simplify the reuse of the block (we know that any
847 // unused block is filled with zeros), and to simplify the handling of write /
848 // read partial information from an entry (don't have to worry about returning
849 // data related to a previous cache entry because the range was not fully
850 // written before).
851 EntryImpl::~EntryImpl() {
852 Log("~EntryImpl in");
853
854 // Save the sparse info to disk. This will generate IO for this entry and
855 // maybe for a child entry, so it is important to do it before deleting this
856 // entry.
857 sparse_.reset();
858
859 // Remove this entry from the list of open entries.
860 backend_->OnEntryDestroyBegin(entry_.address());
861
862 if (doomed_) {
863 DeleteEntryData(true);
864 } else {
865 net_log_.AddEvent(net::NetLog::TYPE_DISK_CACHE_CLOSE, NULL);
866 bool ret = true;
867 for (int index = 0; index < kNumStreams; index++) {
868 if (user_buffers_[index].get()) {
869 if (!(ret = Flush(index, 0)))
870 LOG(ERROR) << "Failed to save user data";
871 }
872 if (unreported_size_[index]) {
873 backend_->ModifyStorageSize(
874 entry_.Data()->data_size[index] - unreported_size_[index],
875 entry_.Data()->data_size[index]);
876 }
877 }
878
879 if (!ret) {
880 // There was a failure writing the actual data. Mark the entry as dirty.
881 int current_id = backend_->GetCurrentEntryId();
882 node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
883 node_.Store();
884 } else if (node_.HasData() && node_.Data()->dirty) {
885 node_.Data()->dirty = 0;
886 node_.Store();
887 }
888 }
889
890 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
891 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY, NULL);
892 backend_->OnEntryDestroyEnd();
893 }
894
1064 // ------------------------------------------------------------------------ 895 // ------------------------------------------------------------------------
1065 896
897 int EntryImpl::InternalReadData(int index, int offset, net::IOBuffer* buf,
898 int buf_len, CompletionCallback* callback) {
899 DCHECK(node_.Data()->dirty || read_only_);
900 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
901 if (index < 0 || index >= kNumStreams)
902 return net::ERR_INVALID_ARGUMENT;
903
904 int entry_size = entry_.Data()->data_size[index];
905 if (offset >= entry_size || offset < 0 || !buf_len)
906 return 0;
907
908 if (buf_len < 0)
909 return net::ERR_INVALID_ARGUMENT;
910
911 TimeTicks start = TimeTicks::Now();
912
913 if (offset + buf_len > entry_size)
914 buf_len = entry_size - offset;
915
916 UpdateRank(false);
917
918 backend_->OnEvent(Stats::READ_DATA);
919 backend_->OnRead(buf_len);
920
921 Addr address(entry_.Data()->data_addr[index]);
922 int eof = address.is_initialized() ? entry_size : 0;
923 if (user_buffers_[index].get() &&
924 user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
925 // Complete the operation locally.
926 buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
927 ReportIOTime(kRead, start);
928 return buf_len;
929 }
930
931 address.set_value(entry_.Data()->data_addr[index]);
932 DCHECK(address.is_initialized());
933 if (!address.is_initialized())
934 return net::ERR_FAILED;
935
936 File* file = GetBackingFile(address, index);
937 if (!file)
938 return net::ERR_FAILED;
939
940 size_t file_offset = offset;
941 if (address.is_block_file()) {
942 DCHECK_LE(offset + buf_len, kMaxBlockSize);
943 file_offset += address.start_block() * address.BlockSize() +
944 kBlockHeaderSize;
945 }
946
947 SyncCallback* io_callback = NULL;
948 if (callback) {
949 io_callback = new SyncCallback(this, buf, callback,
950 net::NetLog::TYPE_DISK_CACHE_READ_DATA);
951 }
952
953 bool completed;
954 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
955 if (io_callback)
956 io_callback->Discard();
957 return net::ERR_FAILED;
958 }
959
960 if (io_callback && completed)
961 io_callback->Discard();
962
963 ReportIOTime(kRead, start);
964 return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
965 }
966
967 int EntryImpl::InternalWriteData(int index, int offset, net::IOBuffer* buf,
968 int buf_len, CompletionCallback* callback,
969 bool truncate) {
970 DCHECK(node_.Data()->dirty || read_only_);
971 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
972 if (index < 0 || index >= kNumStreams)
973 return net::ERR_INVALID_ARGUMENT;
974
975 if (offset < 0 || buf_len < 0)
976 return net::ERR_INVALID_ARGUMENT;
977
978 int max_file_size = backend_->MaxFileSize();
979
980 // offset or buf_len could be negative numbers.
981 if (offset > max_file_size || buf_len > max_file_size ||
982 offset + buf_len > max_file_size) {
983 int size = offset + buf_len;
984 if (size <= max_file_size)
985 size = kint32max;
986 backend_->TooMuchStorageRequested(size);
987 return net::ERR_FAILED;
988 }
989
990 TimeTicks start = TimeTicks::Now();
991
992 // Read the size at this point (it may change inside prepare).
993 int entry_size = entry_.Data()->data_size[index];
994 bool extending = entry_size < offset + buf_len;
995 truncate = truncate && entry_size > offset + buf_len;
996 Trace("To PrepareTarget 0x%x", entry_.address().value());
997 if (!PrepareTarget(index, offset, buf_len, truncate))
998 return net::ERR_FAILED;
999
1000 Trace("From PrepareTarget 0x%x", entry_.address().value());
1001 if (extending || truncate)
1002 UpdateSize(index, entry_size, offset + buf_len);
1003
1004 UpdateRank(true);
1005
1006 backend_->OnEvent(Stats::WRITE_DATA);
1007 backend_->OnWrite(buf_len);
1008
1009 if (user_buffers_[index].get()) {
1010 // Complete the operation locally.
1011 user_buffers_[index]->Write(offset, buf, buf_len);
1012 ReportIOTime(kWrite, start);
1013 return buf_len;
1014 }
1015
1016 Addr address(entry_.Data()->data_addr[index]);
1017 if (offset + buf_len == 0) {
1018 if (truncate) {
1019 DCHECK(!address.is_initialized());
1020 }
1021 return 0;
1022 }
1023
1024 File* file = GetBackingFile(address, index);
1025 if (!file)
1026 return net::ERR_FAILED;
1027
1028 size_t file_offset = offset;
1029 if (address.is_block_file()) {
1030 DCHECK_LE(offset + buf_len, kMaxBlockSize);
1031 file_offset += address.start_block() * address.BlockSize() +
1032 kBlockHeaderSize;
1033 } else if (truncate || (extending && !buf_len)) {
1034 if (!file->SetLength(offset + buf_len))
1035 return net::ERR_FAILED;
1036 }
1037
1038 if (!buf_len)
1039 return 0;
1040
1041 SyncCallback* io_callback = NULL;
1042 if (callback) {
1043 io_callback = new SyncCallback(this, buf, callback,
1044 net::NetLog::TYPE_DISK_CACHE_WRITE_DATA);
1045 }
1046
1047 bool completed;
1048 if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
1049 &completed)) {
1050 if (io_callback)
1051 io_callback->Discard();
1052 return net::ERR_FAILED;
1053 }
1054
1055 if (io_callback && completed)
1056 io_callback->Discard();
1057
1058 ReportIOTime(kWrite, start);
1059 return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
1060 }
1061
1062 // ------------------------------------------------------------------------
1063
1066 bool EntryImpl::CreateDataBlock(int index, int size) { 1064 bool EntryImpl::CreateDataBlock(int index, int size) {
1067 DCHECK(index >= 0 && index < kNumStreams); 1065 DCHECK(index >= 0 && index < kNumStreams);
1068 1066
1069 Addr address(entry_.Data()->data_addr[index]); 1067 Addr address(entry_.Data()->data_addr[index]);
1070 if (!CreateBlock(size, &address)) 1068 if (!CreateBlock(size, &address))
1071 return false; 1069 return false;
1072 1070
1073 entry_.Data()->data_addr[index] = address.value(); 1071 entry_.Data()->data_addr[index] = address.value();
1074 entry_.Store(); 1072 entry_.Store();
1075 return true; 1073 return true;
(...skipping 361 matching lines...) Expand 10 before | Expand all | Expand 10 after
1437 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), 1435 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
1438 entry_.address().value(), node_.address().value()); 1436 entry_.address().value(), node_.address().value());
1439 1437
1440 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], 1438 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0],
1441 entry_.Data()->data_addr[1], entry_.Data()->long_key); 1439 entry_.Data()->data_addr[1], entry_.Data()->long_key);
1442 1440
1443 Trace(" doomed: %d 0x%x", doomed_, dirty); 1441 Trace(" doomed: %d 0x%x", doomed_, dirty);
1444 } 1442 }
1445 1443
1446 } // namespace disk_cache 1444 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698