Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/simple/simple_entry_impl.h" | 5 #include "net/disk_cache/simple/simple_entry_impl.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <cstring> | 8 #include <cstring> |
| 9 #include <vector> | 9 #include <vector> |
| 10 | 10 |
| (...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 167 path_(path), | 167 path_(path), |
| 168 entry_hash_(entry_hash), | 168 entry_hash_(entry_hash), |
| 169 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS), | 169 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS), |
| 170 last_used_(Time::Now()), | 170 last_used_(Time::Now()), |
| 171 last_modified_(last_used_), | 171 last_modified_(last_used_), |
| 172 open_count_(0), | 172 open_count_(0), |
| 173 doomed_(false), | 173 doomed_(false), |
| 174 state_(STATE_UNINITIALIZED), | 174 state_(STATE_UNINITIALIZED), |
| 175 synchronous_entry_(NULL), | 175 synchronous_entry_(NULL), |
| 176 net_log_(net::BoundNetLog::Make( | 176 net_log_(net::BoundNetLog::Make( |
| 177 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)) { | 177 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)), |
| 178 stream_0_data_(new net::GrowableIOBuffer()) { | |
| 178 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_), | 179 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_), |
| 179 arrays_should_be_same_size); | 180 arrays_should_be_same_size); |
| 180 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_), | 181 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_), |
| 181 arrays_should_be_same_size); | 182 arrays_should_be_same_size); |
| 182 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_), | 183 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_), |
| 183 arrays_should_be_same_size); | 184 arrays_should_be_same_size); |
| 184 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_), | 185 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_), |
| 185 arrays_should_be_same_size); | 186 arrays_should_be_same_size); |
| 186 MakeUninitialized(); | 187 MakeUninitialized(); |
| 187 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY, | 188 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY, |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 330 int buf_len, | 331 int buf_len, |
| 331 const CompletionCallback& callback) { | 332 const CompletionCallback& callback) { |
| 332 DCHECK(io_thread_checker_.CalledOnValidThread()); | 333 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 333 | 334 |
| 334 if (net_log_.IsLoggingAllEvents()) { | 335 if (net_log_.IsLoggingAllEvents()) { |
| 335 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL, | 336 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL, |
| 336 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, | 337 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, |
| 337 false)); | 338 false)); |
| 338 } | 339 } |
| 339 | 340 |
| 340 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || | 341 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount || |
| 341 buf_len < 0) { | 342 buf_len < 0) { |
| 342 if (net_log_.IsLoggingAllEvents()) { | 343 if (net_log_.IsLoggingAllEvents()) { |
| 343 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, | 344 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, |
| 344 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); | 345 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); |
| 345 } | 346 } |
| 346 | 347 |
| 347 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT); | 348 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT); |
| 348 return net::ERR_INVALID_ARGUMENT; | 349 return net::ERR_INVALID_ARGUMENT; |
| 349 } | 350 } |
| 350 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) || | 351 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) || |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 376 bool truncate) { | 377 bool truncate) { |
| 377 DCHECK(io_thread_checker_.CalledOnValidThread()); | 378 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 378 | 379 |
| 379 if (net_log_.IsLoggingAllEvents()) { | 380 if (net_log_.IsLoggingAllEvents()) { |
| 380 net_log_.AddEvent( | 381 net_log_.AddEvent( |
| 381 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL, | 382 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL, |
| 382 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, | 383 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, |
| 383 truncate)); | 384 truncate)); |
| 384 } | 385 } |
| 385 | 386 |
| 386 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 || | 387 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount || |
| 387 buf_len < 0) { | 388 offset < 0 || buf_len < 0) { |
| 388 if (net_log_.IsLoggingAllEvents()) { | 389 if (net_log_.IsLoggingAllEvents()) { |
| 389 net_log_.AddEvent( | 390 net_log_.AddEvent( |
| 390 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, | 391 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, |
| 391 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); | 392 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); |
| 392 } | 393 } |
| 393 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT); | 394 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT); |
| 394 return net::ERR_INVALID_ARGUMENT; | 395 return net::ERR_INVALID_ARGUMENT; |
| 395 } | 396 } |
| 396 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) { | 397 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) { |
| 397 if (net_log_.IsLoggingAllEvents()) { | 398 if (net_log_.IsLoggingAllEvents()) { |
| 398 net_log_.AddEvent( | 399 net_log_.AddEvent( |
| 399 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, | 400 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, |
| 400 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); | 401 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); |
| 401 } | 402 } |
| 402 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE); | 403 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE); |
| 403 return net::ERR_FAILED; | 404 return net::ERR_FAILED; |
| 404 } | 405 } |
| 405 ScopedOperationRunner operation_runner(this); | 406 ScopedOperationRunner operation_runner(this); |
| 406 | 407 |
| 407 // Currently, Simple Cache is only used for HTTP, which stores the headers in | |
| 408 // stream 0 and always writes them with a single, truncating write. Detect | |
| 409 // these writes and record the size and size changes of the headers. Also, | |
| 410 // note writes to stream 0 that violate those assumptions. | |
| 411 if (stream_index == 0) { | |
| 412 if (offset == 0 && truncate) | |
| 413 RecordHeaderSizeChange(cache_type_, data_size_[0], buf_len); | |
| 414 else | |
| 415 RecordUnexpectedStream0Write(cache_type_); | |
| 416 } | |
| 417 | |
| 418 // We can only do optimistic Write if there is no pending operations, so | 408 // We can only do optimistic Write if there is no pending operations, so |
| 419 // that we are sure that the next call to RunNextOperationIfNeeded will | 409 // that we are sure that the next call to RunNextOperationIfNeeded will |
| 420 // actually run the write operation that sets the stream size. It also | 410 // actually run the write operation that sets the stream size. It also |
| 421 // prevents from previous possibly-conflicting writes that could be stacked | 411 // prevents from previous possibly-conflicting writes that could be stacked |
| 422 // in the |pending_operations_|. We could optimize this for when we have | 412 // in the |pending_operations_|. We could optimize this for when we have |
| 423 // only read operations enqueued. | 413 // only read operations enqueued. |
| 424 const bool optimistic = | 414 const bool optimistic = |
| 425 (use_optimistic_operations_ && state_ == STATE_READY && | 415 (use_optimistic_operations_ && state_ == STATE_READY && |
| 426 pending_operations_.size() == 0); | 416 pending_operations_.size() == 0); |
| 427 CompletionCallback op_callback; | 417 CompletionCallback op_callback; |
| (...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 687 DCHECK_EQ(STATE_UNINITIALIZED, state_); | 677 DCHECK_EQ(STATE_UNINITIALIZED, state_); |
| 688 DCHECK(!synchronous_entry_); | 678 DCHECK(!synchronous_entry_); |
| 689 | 679 |
| 690 state_ = STATE_IO_PENDING; | 680 state_ = STATE_IO_PENDING; |
| 691 | 681 |
| 692 // Since we don't know the correct values for |last_used_| and | 682 // Since we don't know the correct values for |last_used_| and |
| 693 // |last_modified_| yet, we make this approximation. | 683 // |last_modified_| yet, we make this approximation. |
| 694 last_used_ = last_modified_ = base::Time::Now(); | 684 last_used_ = last_modified_ = base::Time::Now(); |
| 695 | 685 |
| 696 // If creation succeeds, we should mark all streams to be saved on close. | 686 // If creation succeeds, we should mark all streams to be saved on close. |
| 697 for (int i = 0; i < kSimpleEntryFileCount; ++i) | 687 for (int i = 0; i < kSimpleEntryStreamCount; ++i) |
| 698 have_written_[i] = true; | 688 have_written_[i] = true; |
| 699 | 689 |
| 700 const base::TimeTicks start_time = base::TimeTicks::Now(); | 690 const base::TimeTicks start_time = base::TimeTicks::Now(); |
| 701 scoped_ptr<SimpleEntryCreationResults> results( | 691 scoped_ptr<SimpleEntryCreationResults> results( |
| 702 new SimpleEntryCreationResults( | 692 new SimpleEntryCreationResults( |
| 703 SimpleEntryStat(last_used_, last_modified_, data_size_))); | 693 SimpleEntryStat(last_used_, last_modified_, data_size_))); |
| 704 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, | 694 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, |
| 705 cache_type_, | 695 cache_type_, |
| 706 path_, | 696 path_, |
| 707 key_, | 697 key_, |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 722 DCHECK(io_thread_checker_.CalledOnValidThread()); | 712 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 723 typedef SimpleSynchronousEntry::CRCRecord CRCRecord; | 713 typedef SimpleSynchronousEntry::CRCRecord CRCRecord; |
| 724 scoped_ptr<std::vector<CRCRecord> > | 714 scoped_ptr<std::vector<CRCRecord> > |
| 725 crc32s_to_write(new std::vector<CRCRecord>()); | 715 crc32s_to_write(new std::vector<CRCRecord>()); |
| 726 | 716 |
| 727 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN); | 717 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN); |
| 728 | 718 |
| 729 if (state_ == STATE_READY) { | 719 if (state_ == STATE_READY) { |
| 730 DCHECK(synchronous_entry_); | 720 DCHECK(synchronous_entry_); |
| 731 state_ = STATE_IO_PENDING; | 721 state_ = STATE_IO_PENDING; |
| 732 for (int i = 0; i < kSimpleEntryFileCount; ++i) { | 722 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { |
| 733 if (have_written_[i]) { | 723 if (have_written_[i]) { |
| 734 if (GetDataSize(i) == crc32s_end_offset_[i]) { | 724 if (GetDataSize(i) == crc32s_end_offset_[i]) { |
| 735 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; | 725 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; |
| 736 crc32s_to_write->push_back(CRCRecord(i, true, crc)); | 726 crc32s_to_write->push_back(CRCRecord(i, true, crc)); |
| 737 } else { | 727 } else { |
| 738 crc32s_to_write->push_back(CRCRecord(i, false, 0)); | 728 crc32s_to_write->push_back(CRCRecord(i, false, 0)); |
| 739 } | 729 } |
| 740 } | 730 } |
| 741 } | 731 } |
| 742 } else { | 732 } else { |
| 743 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_); | 733 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_); |
| 744 } | 734 } |
| 745 | 735 |
| 746 if (synchronous_entry_) { | 736 if (synchronous_entry_) { |
| 747 Closure task = | 737 Closure task = |
| 748 base::Bind(&SimpleSynchronousEntry::Close, | 738 base::Bind(&SimpleSynchronousEntry::Close, |
| 749 base::Unretained(synchronous_entry_), | 739 base::Unretained(synchronous_entry_), |
| 750 SimpleEntryStat(last_used_, last_modified_, data_size_), | 740 SimpleEntryStat(last_used_, last_modified_, data_size_), |
| 751 base::Passed(&crc32s_to_write)); | 741 base::Passed(&crc32s_to_write), |
| 742 stream_0_data_); | |
| 752 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); | 743 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); |
| 753 synchronous_entry_ = NULL; | 744 synchronous_entry_ = NULL; |
| 754 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | 745 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); |
| 755 | 746 |
| 756 for (int i = 0; i < kSimpleEntryFileCount; ++i) { | 747 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { |
| 757 if (!have_written_[i]) { | 748 if (!have_written_[i]) { |
| 758 SIMPLE_CACHE_UMA(ENUMERATION, | 749 SIMPLE_CACHE_UMA(ENUMERATION, |
| 759 "CheckCRCResult", cache_type_, | 750 "CheckCRCResult", cache_type_, |
| 760 crc_check_state_[i], CRC_CHECK_MAX); | 751 crc_check_state_[i], CRC_CHECK_MAX); |
| 761 } | 752 } |
| 762 } | 753 } |
| 763 } else { | 754 } else { |
| 764 CloseOperationComplete(); | 755 CloseOperationComplete(); |
| 765 } | 756 } |
| 766 } | 757 } |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 801 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN); | 792 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN); |
| 802 // If there is nothing to read, we bail out before setting state_ to | 793 // If there is nothing to read, we bail out before setting state_ to |
| 803 // STATE_IO_PENDING. | 794 // STATE_IO_PENDING. |
| 804 if (!callback.is_null()) | 795 if (!callback.is_null()) |
| 805 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0)); | 796 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0)); |
| 806 return; | 797 return; |
| 807 } | 798 } |
| 808 | 799 |
| 809 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset); | 800 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset); |
| 810 | 801 |
| 802 // Since stream 0 data is kept in memory, it is read immediately. | |
| 803 if (stream_index == 0) { | |
| 804 int ret_value = ReadStream0Data(buf, offset, buf_len); | |
| 805 if (!callback.is_null()) { | |
| 806 MessageLoopProxy::current()->PostTask(FROM_HERE, | |
| 807 base::Bind(callback, ret_value)); | |
| 808 } | |
| 809 return; | |
| 810 } | |
| 811 | |
| 811 state_ = STATE_IO_PENDING; | 812 state_ = STATE_IO_PENDING; |
| 812 if (!doomed_ && backend_.get()) | 813 if (!doomed_ && backend_.get()) |
| 813 backend_->index()->UseIfExists(entry_hash_); | 814 backend_->index()->UseIfExists(entry_hash_); |
| 814 | 815 |
| 815 scoped_ptr<uint32> read_crc32(new uint32()); | 816 scoped_ptr<uint32> read_crc32(new uint32()); |
| 816 scoped_ptr<int> result(new int()); | 817 scoped_ptr<int> result(new int()); |
| 817 scoped_ptr<base::Time> last_used(new base::Time()); | 818 scoped_ptr<SimpleEntryStat> entry_stat( |
| 819 new SimpleEntryStat(last_used_, last_modified_, data_size_)); | |
| 818 Closure task = base::Bind( | 820 Closure task = base::Bind( |
| 819 &SimpleSynchronousEntry::ReadData, | 821 &SimpleSynchronousEntry::ReadData, |
| 820 base::Unretained(synchronous_entry_), | 822 base::Unretained(synchronous_entry_), |
| 821 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len), | 823 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len), |
| 822 make_scoped_refptr(buf), | 824 make_scoped_refptr(buf), |
| 823 read_crc32.get(), | 825 read_crc32.get(), |
| 824 last_used.get(), | 826 entry_stat.get(), |
| 825 result.get()); | 827 result.get()); |
| 826 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, | 828 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, |
| 827 this, | 829 this, |
| 828 stream_index, | 830 stream_index, |
| 829 offset, | 831 offset, |
| 830 callback, | 832 callback, |
| 831 base::Passed(&read_crc32), | 833 base::Passed(&read_crc32), |
| 832 base::Passed(&last_used), | 834 base::Passed(&entry_stat), |
| 833 base::Passed(&result)); | 835 base::Passed(&result)); |
| 834 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | 836 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); |
| 835 } | 837 } |
| 836 | 838 |
| 837 void SimpleEntryImpl::WriteDataInternal(int stream_index, | 839 void SimpleEntryImpl::WriteDataInternal(int stream_index, |
| 838 int offset, | 840 int offset, |
| 839 net::IOBuffer* buf, | 841 net::IOBuffer* buf, |
| 840 int buf_len, | 842 int buf_len, |
| 841 const CompletionCallback& callback, | 843 const CompletionCallback& callback, |
| 842 bool truncate) { | 844 bool truncate) { |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 859 } | 861 } |
| 860 if (!callback.is_null()) { | 862 if (!callback.is_null()) { |
| 861 MessageLoopProxy::current()->PostTask( | 863 MessageLoopProxy::current()->PostTask( |
| 862 FROM_HERE, base::Bind(callback, net::ERR_FAILED)); | 864 FROM_HERE, base::Bind(callback, net::ERR_FAILED)); |
| 863 } | 865 } |
| 864 // |this| may be destroyed after return here. | 866 // |this| may be destroyed after return here. |
| 865 return; | 867 return; |
| 866 } | 868 } |
| 867 | 869 |
| 868 DCHECK_EQ(STATE_READY, state_); | 870 DCHECK_EQ(STATE_READY, state_); |
| 871 | |
| 872 // Since stream 0 data is kept in memory, it will be written immediatly. | |
| 873 if (stream_index == 0) { | |
| 874 int ret_value = SetStream0Data(buf, offset, buf_len, truncate); | |
| 875 if (!callback.is_null()) { | |
| 876 MessageLoopProxy::current()->PostTask(FROM_HERE, | |
| 877 base::Bind(callback, ret_value)); | |
| 878 } | |
| 879 return; | |
| 880 } | |
| 881 | |
| 869 state_ = STATE_IO_PENDING; | 882 state_ = STATE_IO_PENDING; |
| 870 if (!doomed_ && backend_.get()) | 883 if (!doomed_ && backend_.get()) |
| 871 backend_->index()->UseIfExists(entry_hash_); | 884 backend_->index()->UseIfExists(entry_hash_); |
| 872 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|) | 885 |
| 873 // if |offset == 0| or we have already computed the CRC for [0 .. offset). | 886 AdvanceCrc(buf, offset, buf_len, stream_index); |
| 874 // We rely on most write operations being sequential, start to end to compute | |
| 875 // the crc of the data. When we write to an entry and close without having | |
| 876 // done a sequential write, we don't check the CRC on read. | |
| 877 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) { | |
| 878 uint32 initial_crc = (offset != 0) ? crc32s_[stream_index] | |
| 879 : crc32(0, Z_NULL, 0); | |
| 880 if (buf_len > 0) { | |
| 881 crc32s_[stream_index] = crc32(initial_crc, | |
| 882 reinterpret_cast<const Bytef*>(buf->data()), | |
| 883 buf_len); | |
| 884 } | |
| 885 crc32s_end_offset_[stream_index] = offset + buf_len; | |
| 886 } | |
| 887 | 887 |
| 888 // |entry_stat| needs to be initialized before modifying |data_size_|. | 888 // |entry_stat| needs to be initialized before modifying |data_size_|. |
| 889 scoped_ptr<SimpleEntryStat> entry_stat( | 889 scoped_ptr<SimpleEntryStat> entry_stat( |
| 890 new SimpleEntryStat(last_used_, last_modified_, data_size_)); | 890 new SimpleEntryStat(last_used_, last_modified_, data_size_)); |
| 891 if (truncate) { | 891 if (truncate) { |
| 892 data_size_[stream_index] = offset + buf_len; | 892 data_size_[stream_index] = offset + buf_len; |
| 893 } else { | 893 } else { |
| 894 data_size_[stream_index] = std::max(offset + buf_len, | 894 data_size_[stream_index] = std::max(offset + buf_len, |
| 895 GetDataSize(stream_index)); | 895 GetDataSize(stream_index)); |
| 896 } | 896 } |
| 897 | 897 |
| 898 // Since we don't know the correct values for |last_used_| and | 898 // Since we don't know the correct values for |last_used_| and |
| 899 // |last_modified_| yet, we make this approximation. | 899 // |last_modified_| yet, we make this approximation. |
| 900 last_used_ = last_modified_ = base::Time::Now(); | 900 last_used_ = last_modified_ = base::Time::Now(); |
| 901 | 901 |
| 902 have_written_[stream_index] = true; | 902 have_written_[stream_index] = true; |
| 903 // Writing on stream 1 affects the placement of stream 0 in the file. | |
|
pasko
2013/09/18 16:53:56
please add: ", the EOF record will need to be writ
clamy
2013/09/18 17:20:46
Done.
| |
| 904 if (stream_index == 1) | |
| 905 have_written_[0] = true; | |
| 903 | 906 |
| 904 scoped_ptr<int> result(new int()); | 907 scoped_ptr<int> result(new int()); |
| 905 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData, | 908 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData, |
| 906 base::Unretained(synchronous_entry_), | 909 base::Unretained(synchronous_entry_), |
| 907 SimpleSynchronousEntry::EntryOperationData( | 910 SimpleSynchronousEntry::EntryOperationData( |
| 908 stream_index, offset, buf_len, truncate), | 911 stream_index, offset, buf_len, truncate), |
| 909 make_scoped_refptr(buf), | 912 make_scoped_refptr(buf), |
| 910 entry_stat.get(), | 913 entry_stat.get(), |
| 911 result.get()); | 914 result.get()); |
| 912 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, | 915 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 949 MakeUninitialized(); | 952 MakeUninitialized(); |
| 950 return; | 953 return; |
| 951 } | 954 } |
| 952 // If out_entry is NULL, it means we already called ReturnEntryToCaller from | 955 // If out_entry is NULL, it means we already called ReturnEntryToCaller from |
| 953 // the optimistic Create case. | 956 // the optimistic Create case. |
| 954 if (out_entry) | 957 if (out_entry) |
| 955 ReturnEntryToCaller(out_entry); | 958 ReturnEntryToCaller(out_entry); |
| 956 | 959 |
| 957 state_ = STATE_READY; | 960 state_ = STATE_READY; |
| 958 synchronous_entry_ = in_results->sync_entry; | 961 synchronous_entry_ = in_results->sync_entry; |
| 962 if (in_results->stream_0_data) { | |
| 963 stream_0_data_ = in_results->stream_0_data; | |
| 964 // The crc was read in SimpleSynchronousEntry. | |
| 965 crc_check_state_[0] = CRC_CHECK_DONE; | |
| 966 crc32s_[0] = in_results->stream_0_crc32; | |
| 967 crc32s_end_offset_[0] = in_results->entry_stat.data_size(0); | |
| 968 } | |
| 959 if (key_.empty()) { | 969 if (key_.empty()) { |
| 960 SetKey(synchronous_entry_->key()); | 970 SetKey(synchronous_entry_->key()); |
| 961 } else { | 971 } else { |
| 962 // This should only be triggered when creating an entry. The key check in | 972 // This should only be triggered when creating an entry. The key check in |
| 963 // the open case is handled in SimpleBackendImpl. | 973 // the open case is handled in SimpleBackendImpl. |
| 964 DCHECK_EQ(key_, synchronous_entry_->key()); | 974 DCHECK_EQ(key_, synchronous_entry_->key()); |
| 965 } | 975 } |
| 966 UpdateDataFromEntryStat(in_results->entry_stat); | 976 UpdateDataFromEntryStat(in_results->entry_stat); |
| 967 SIMPLE_CACHE_UMA(TIMES, | 977 SIMPLE_CACHE_UMA(TIMES, |
| 968 "EntryCreationTime", cache_type_, | 978 "EntryCreationTime", cache_type_, |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 996 completion_callback, *result)); | 1006 completion_callback, *result)); |
| 997 } | 1007 } |
| 998 RunNextOperationIfNeeded(); | 1008 RunNextOperationIfNeeded(); |
| 999 } | 1009 } |
| 1000 | 1010 |
| 1001 void SimpleEntryImpl::ReadOperationComplete( | 1011 void SimpleEntryImpl::ReadOperationComplete( |
| 1002 int stream_index, | 1012 int stream_index, |
| 1003 int offset, | 1013 int offset, |
| 1004 const CompletionCallback& completion_callback, | 1014 const CompletionCallback& completion_callback, |
| 1005 scoped_ptr<uint32> read_crc32, | 1015 scoped_ptr<uint32> read_crc32, |
| 1006 scoped_ptr<base::Time> last_used, | 1016 scoped_ptr<SimpleEntryStat> entry_stat, |
| 1007 scoped_ptr<int> result) { | 1017 scoped_ptr<int> result) { |
| 1008 DCHECK(io_thread_checker_.CalledOnValidThread()); | 1018 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 1009 DCHECK(synchronous_entry_); | 1019 DCHECK(synchronous_entry_); |
| 1010 DCHECK_EQ(STATE_IO_PENDING, state_); | 1020 DCHECK_EQ(STATE_IO_PENDING, state_); |
| 1011 DCHECK(read_crc32); | 1021 DCHECK(read_crc32); |
| 1012 DCHECK(result); | 1022 DCHECK(result); |
| 1013 | 1023 |
| 1014 if (*result > 0 && | 1024 if (*result > 0 && |
| 1015 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) { | 1025 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) { |
| 1016 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END; | 1026 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END; |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 1032 // entry, one reader can be behind the other. In this case we compute | 1042 // entry, one reader can be behind the other. In this case we compute |
| 1033 // the crc as the most advanced reader progresses, and check it for | 1043 // the crc as the most advanced reader progresses, and check it for |
| 1034 // both readers as they read the last byte. | 1044 // both readers as they read the last byte. |
| 1035 | 1045 |
| 1036 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN); | 1046 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN); |
| 1037 | 1047 |
| 1038 scoped_ptr<int> new_result(new int()); | 1048 scoped_ptr<int> new_result(new int()); |
| 1039 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord, | 1049 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord, |
| 1040 base::Unretained(synchronous_entry_), | 1050 base::Unretained(synchronous_entry_), |
| 1041 stream_index, | 1051 stream_index, |
| 1042 data_size_[stream_index], | 1052 *entry_stat, |
| 1043 crc32s_[stream_index], | 1053 crc32s_[stream_index], |
| 1044 new_result.get()); | 1054 new_result.get()); |
| 1045 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete, | 1055 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete, |
| 1046 this, *result, stream_index, | 1056 this, *result, stream_index, |
| 1047 completion_callback, | 1057 completion_callback, |
| 1048 base::Passed(&new_result)); | 1058 base::Passed(&new_result)); |
| 1049 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | 1059 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); |
| 1050 crc_check_state_[stream_index] = CRC_CHECK_DONE; | 1060 crc_check_state_[stream_index] = CRC_CHECK_DONE; |
| 1051 return; | 1061 return; |
| 1052 } | 1062 } |
| 1053 } | 1063 } |
| 1054 | 1064 |
| 1055 if (*result < 0) { | 1065 if (*result < 0) { |
| 1056 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); | 1066 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); |
| 1057 } else { | 1067 } else { |
| 1058 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); | 1068 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); |
| 1059 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END && | 1069 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END && |
| 1060 offset + *result == GetDataSize(stream_index)) { | 1070 offset + *result == GetDataSize(stream_index)) { |
| 1061 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE; | 1071 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE; |
| 1062 } | 1072 } |
| 1063 } | 1073 } |
| 1064 if (net_log_.IsLoggingAllEvents()) { | 1074 if (net_log_.IsLoggingAllEvents()) { |
| 1065 net_log_.AddEvent( | 1075 net_log_.AddEvent( |
| 1066 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, | 1076 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, |
| 1067 CreateNetLogReadWriteCompleteCallback(*result)); | 1077 CreateNetLogReadWriteCompleteCallback(*result)); |
| 1068 } | 1078 } |
| 1069 | 1079 |
| 1070 EntryOperationComplete( | 1080 EntryOperationComplete( |
| 1071 stream_index, | 1081 stream_index, completion_callback, *entry_stat, result.Pass()); |
| 1072 completion_callback, | |
| 1073 SimpleEntryStat(*last_used, last_modified_, data_size_), | |
| 1074 result.Pass()); | |
| 1075 } | 1082 } |
| 1076 | 1083 |
| 1077 void SimpleEntryImpl::WriteOperationComplete( | 1084 void SimpleEntryImpl::WriteOperationComplete( |
| 1078 int stream_index, | 1085 int stream_index, |
| 1079 const CompletionCallback& completion_callback, | 1086 const CompletionCallback& completion_callback, |
| 1080 scoped_ptr<SimpleEntryStat> entry_stat, | 1087 scoped_ptr<SimpleEntryStat> entry_stat, |
| 1081 scoped_ptr<int> result) { | 1088 scoped_ptr<int> result) { |
| 1082 if (*result >= 0) | 1089 if (*result >= 0) |
| 1083 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); | 1090 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); |
| 1084 else | 1091 else |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1151 MakeUninitialized(); | 1158 MakeUninitialized(); |
| 1152 RunNextOperationIfNeeded(); | 1159 RunNextOperationIfNeeded(); |
| 1153 } | 1160 } |
| 1154 | 1161 |
| 1155 void SimpleEntryImpl::UpdateDataFromEntryStat( | 1162 void SimpleEntryImpl::UpdateDataFromEntryStat( |
| 1156 const SimpleEntryStat& entry_stat) { | 1163 const SimpleEntryStat& entry_stat) { |
| 1157 DCHECK(io_thread_checker_.CalledOnValidThread()); | 1164 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 1158 DCHECK(synchronous_entry_); | 1165 DCHECK(synchronous_entry_); |
| 1159 DCHECK_EQ(STATE_READY, state_); | 1166 DCHECK_EQ(STATE_READY, state_); |
| 1160 | 1167 |
| 1161 last_used_ = entry_stat.last_used; | 1168 last_used_ = entry_stat.last_used(); |
| 1162 last_modified_ = entry_stat.last_modified; | 1169 last_modified_ = entry_stat.last_modified(); |
| 1163 for (int i = 0; i < kSimpleEntryFileCount; ++i) { | 1170 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { |
| 1164 data_size_[i] = entry_stat.data_size[i]; | 1171 data_size_[i] = entry_stat.data_size(i); |
| 1165 } | 1172 } |
| 1166 if (!doomed_ && backend_.get()) | 1173 if (!doomed_ && backend_.get()) |
| 1167 backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage()); | 1174 backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage()); |
| 1168 } | 1175 } |
| 1169 | 1176 |
| 1170 int64 SimpleEntryImpl::GetDiskUsage() const { | 1177 int64 SimpleEntryImpl::GetDiskUsage() const { |
| 1171 int64 file_size = 0; | 1178 int64 file_size = 0; |
| 1172 for (int i = 0; i < kSimpleEntryFileCount; ++i) { | 1179 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { |
| 1173 file_size += | 1180 file_size += |
| 1174 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]); | 1181 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]); |
| 1175 } | 1182 } |
| 1176 return file_size; | 1183 return file_size; |
| 1177 } | 1184 } |
| 1178 | 1185 |
| 1179 void SimpleEntryImpl::RecordReadIsParallelizable( | 1186 void SimpleEntryImpl::RecordReadIsParallelizable( |
| 1180 const SimpleEntryOperation& operation) const { | 1187 const SimpleEntryOperation& operation) const { |
| 1181 if (!executing_operation_) | 1188 if (!executing_operation_) |
| 1182 return; | 1189 return; |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1240 } else { | 1247 } else { |
| 1241 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE | 1248 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE |
| 1242 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE; | 1249 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE; |
| 1243 } | 1250 } |
| 1244 } | 1251 } |
| 1245 SIMPLE_CACHE_UMA(ENUMERATION, | 1252 SIMPLE_CACHE_UMA(ENUMERATION, |
| 1246 "WriteDependencyType", cache_type_, | 1253 "WriteDependencyType", cache_type_, |
| 1247 type, WRITE_DEPENDENCY_TYPE_MAX); | 1254 type, WRITE_DEPENDENCY_TYPE_MAX); |
| 1248 } | 1255 } |
| 1249 | 1256 |
| 1257 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf, | |
| 1258 int offset, | |
| 1259 int buf_len) { | |
| 1260 if (buf_len < 0) { | |
| 1261 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); | |
| 1262 return 0; | |
| 1263 } | |
| 1264 memcpy(buf->data(), stream_0_data_->data() + offset, buf_len); | |
| 1265 UpdateDataFromEntryStat( | |
| 1266 SimpleEntryStat(base::Time::Now(), last_modified_, data_size_)); | |
| 1267 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); | |
| 1268 return buf_len; | |
| 1269 } | |
| 1270 | |
| 1271 int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf, | |
| 1272 int offset, | |
| 1273 int buf_len, | |
| 1274 bool truncate) { | |
| 1275 // Currently, stream 0 is only used for HTTP headers, and always writes them | |
| 1276 // with a single, truncating write. Detect these writes and record the size | |
| 1277 // changes of the headers. Also, support writes to stream 0 that have | |
| 1278 // different access patterns, as required by the API contract. | |
| 1279 // All other clients of the Simple Cache are encouraged to use stream 1. | |
| 1280 have_written_[0] = true; | |
| 1281 int data_size = GetDataSize(0); | |
| 1282 if (offset == 0 && truncate) { | |
| 1283 RecordHeaderSizeChange(cache_type_, data_size, buf_len); | |
| 1284 stream_0_data_->SetCapacity(buf_len); | |
| 1285 memcpy(stream_0_data_->data(), buf->data(), buf_len); | |
| 1286 data_size_[0] = buf_len; | |
| 1287 } else { | |
| 1288 RecordUnexpectedStream0Write(cache_type_); | |
| 1289 const int buffer_size = | |
| 1290 truncate ? offset + buf_len : std::max(offset + buf_len, data_size); | |
| 1291 stream_0_data_->SetCapacity(buffer_size); | |
| 1292 // If |stream_0_data_| was extended, the extension until offset needs to be | |
| 1293 // zero-filled. | |
| 1294 const int fill_size = offset <= data_size ? 0 : offset - data_size; | |
| 1295 if (fill_size > 0) | |
| 1296 memset(stream_0_data_->data() + data_size, 0, fill_size); | |
| 1297 if (buf) | |
| 1298 memcpy(stream_0_data_->data() + offset, buf->data(), buf_len); | |
| 1299 data_size_[0] = buffer_size; | |
| 1300 } | |
| 1301 base::Time modification_time = base::Time::Now(); | |
| 1302 AdvanceCrc(buf, offset, buf_len, 0); | |
| 1303 UpdateDataFromEntryStat( | |
| 1304 SimpleEntryStat(modification_time, modification_time, data_size_)); | |
| 1305 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); | |
| 1306 return buf_len; | |
| 1307 } | |
| 1308 | |
| 1309 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer* buffer, | |
| 1310 int offset, | |
| 1311 int length, | |
| 1312 int stream_index) { | |
| 1313 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|) | |
| 1314 // if |offset == 0| or we have already computed the CRC for [0 .. offset). | |
| 1315 // We rely on most write operations being sequential, start to end to compute | |
| 1316 // the crc of the data. When we write to an entry and close without having | |
| 1317 // done a sequential write, we don't check the CRC on read. | |
| 1318 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) { | |
| 1319 uint32 initial_crc = | |
| 1320 (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0); | |
| 1321 if (length > 0) { | |
| 1322 crc32s_[stream_index] = crc32( | |
| 1323 initial_crc, reinterpret_cast<const Bytef*>(buffer->data()), length); | |
| 1324 } | |
| 1325 crc32s_end_offset_[stream_index] = offset + length; | |
| 1326 } | |
| 1327 } | |
| 1328 | |
| 1250 } // namespace disk_cache | 1329 } // namespace disk_cache |
| OLD | NEW |