| OLD | NEW |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/simple/simple_entry_impl.h" | 5 #include "net/disk_cache/simple/simple_entry_impl.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <cstring> | 8 #include <cstring> |
| 9 #include <vector> | 9 #include <vector> |
| 10 | 10 |
| (...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 167 path_(path), | 167 path_(path), |
| 168 entry_hash_(entry_hash), | 168 entry_hash_(entry_hash), |
| 169 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS), | 169 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS), |
| 170 last_used_(Time::Now()), | 170 last_used_(Time::Now()), |
| 171 last_modified_(last_used_), | 171 last_modified_(last_used_), |
| 172 open_count_(0), | 172 open_count_(0), |
| 173 doomed_(false), | 173 doomed_(false), |
| 174 state_(STATE_UNINITIALIZED), | 174 state_(STATE_UNINITIALIZED), |
| 175 synchronous_entry_(NULL), | 175 synchronous_entry_(NULL), |
| 176 net_log_(net::BoundNetLog::Make( | 176 net_log_(net::BoundNetLog::Make( |
| 177 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)) { | 177 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)), |
| 178 stream_0_data_(new net::GrowableIOBuffer()) { |
| 178 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_), | 179 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_), |
| 179 arrays_should_be_same_size); | 180 arrays_should_be_same_size); |
| 180 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_), | 181 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_), |
| 181 arrays_should_be_same_size); | 182 arrays_should_be_same_size); |
| 182 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_), | 183 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_), |
| 183 arrays_should_be_same_size); | 184 arrays_should_be_same_size); |
| 184 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_), | 185 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_), |
| 185 arrays_should_be_same_size); | 186 arrays_should_be_same_size); |
| 186 MakeUninitialized(); | 187 MakeUninitialized(); |
| 187 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY, | 188 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY, |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 330 int buf_len, | 331 int buf_len, |
| 331 const CompletionCallback& callback) { | 332 const CompletionCallback& callback) { |
| 332 DCHECK(io_thread_checker_.CalledOnValidThread()); | 333 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 333 | 334 |
| 334 if (net_log_.IsLoggingAllEvents()) { | 335 if (net_log_.IsLoggingAllEvents()) { |
| 335 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL, | 336 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL, |
| 336 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, | 337 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, |
| 337 false)); | 338 false)); |
| 338 } | 339 } |
| 339 | 340 |
| 340 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || | 341 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount || |
| 341 buf_len < 0) { | 342 buf_len < 0) { |
| 342 if (net_log_.IsLoggingAllEvents()) { | 343 if (net_log_.IsLoggingAllEvents()) { |
| 343 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, | 344 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, |
| 344 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); | 345 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); |
| 345 } | 346 } |
| 346 | 347 |
| 347 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT); | 348 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT); |
| 348 return net::ERR_INVALID_ARGUMENT; | 349 return net::ERR_INVALID_ARGUMENT; |
| 349 } | 350 } |
| 350 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) || | 351 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) || |
| 351 offset < 0 || !buf_len)) { | 352 offset < 0 || !buf_len)) { |
| 352 if (net_log_.IsLoggingAllEvents()) { | 353 if (net_log_.IsLoggingAllEvents()) { |
| 353 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, | 354 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, |
| 354 CreateNetLogReadWriteCompleteCallback(0)); | 355 CreateNetLogReadWriteCompleteCallback(0)); |
| 355 } | 356 } |
| 356 | 357 |
| 357 RecordReadResult(cache_type_, READ_RESULT_NONBLOCK_EMPTY_RETURN); | 358 RecordReadResult(cache_type_, READ_RESULT_NONBLOCK_EMPTY_RETURN); |
| 358 return 0; | 359 return 0; |
| 359 } | 360 } |
| 360 | 361 |
| 362 // TODO(clamy): return immediatly when reading from stream 0. |
| 363 |
| 361 // TODO(felipeg): Optimization: Add support for truly parallel read | 364 // TODO(felipeg): Optimization: Add support for truly parallel read |
| 362 // operations. | 365 // operations. |
| 363 bool alone_in_queue = | 366 bool alone_in_queue = |
| 364 pending_operations_.size() == 0 && state_ == STATE_READY; | 367 pending_operations_.size() == 0 && state_ == STATE_READY; |
| 365 pending_operations_.push(SimpleEntryOperation::ReadOperation( | 368 pending_operations_.push(SimpleEntryOperation::ReadOperation( |
| 366 this, stream_index, offset, buf_len, buf, callback, alone_in_queue)); | 369 this, stream_index, offset, buf_len, buf, callback, alone_in_queue)); |
| 367 RunNextOperationIfNeeded(); | 370 RunNextOperationIfNeeded(); |
| 368 return net::ERR_IO_PENDING; | 371 return net::ERR_IO_PENDING; |
| 369 } | 372 } |
| 370 | 373 |
| 371 int SimpleEntryImpl::WriteData(int stream_index, | 374 int SimpleEntryImpl::WriteData(int stream_index, |
| 372 int offset, | 375 int offset, |
| 373 net::IOBuffer* buf, | 376 net::IOBuffer* buf, |
| 374 int buf_len, | 377 int buf_len, |
| 375 const CompletionCallback& callback, | 378 const CompletionCallback& callback, |
| 376 bool truncate) { | 379 bool truncate) { |
| 377 DCHECK(io_thread_checker_.CalledOnValidThread()); | 380 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 378 | 381 |
| 379 if (net_log_.IsLoggingAllEvents()) { | 382 if (net_log_.IsLoggingAllEvents()) { |
| 380 net_log_.AddEvent( | 383 net_log_.AddEvent( |
| 381 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL, | 384 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL, |
| 382 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, | 385 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, |
| 383 truncate)); | 386 truncate)); |
| 384 } | 387 } |
| 385 | 388 |
| 386 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 || | 389 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount || |
| 387 buf_len < 0) { | 390 offset < 0 || buf_len < 0) { |
| 388 if (net_log_.IsLoggingAllEvents()) { | 391 if (net_log_.IsLoggingAllEvents()) { |
| 389 net_log_.AddEvent( | 392 net_log_.AddEvent( |
| 390 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, | 393 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, |
| 391 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); | 394 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); |
| 392 } | 395 } |
| 393 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT); | 396 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT); |
| 394 return net::ERR_INVALID_ARGUMENT; | 397 return net::ERR_INVALID_ARGUMENT; |
| 395 } | 398 } |
| 396 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) { | 399 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) { |
| 397 if (net_log_.IsLoggingAllEvents()) { | 400 if (net_log_.IsLoggingAllEvents()) { |
| 398 net_log_.AddEvent( | 401 net_log_.AddEvent( |
| 399 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, | 402 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, |
| 400 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); | 403 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); |
| 401 } | 404 } |
| 402 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE); | 405 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE); |
| 403 return net::ERR_FAILED; | 406 return net::ERR_FAILED; |
| 404 } | 407 } |
| 405 ScopedOperationRunner operation_runner(this); | 408 ScopedOperationRunner operation_runner(this); |
| 406 | 409 |
| 407 // Currently, Simple Cache is only used for HTTP, which stores the headers in | 410 // Stream 0 data is kept in memory, so can be written immediatly if there are |
| 408 // stream 0 and always writes them with a single, truncating write. Detect | 411 // no IO operations pending. |
| 409 // these writes and record the size and size changes of the headers. Also, | 412 if (stream_index == 0 && state_ == STATE_READY && |
| 410 // note writes to stream 0 that violate those assumptions. | 413 pending_operations_.size() == 0) |
| 411 if (stream_index == 0) { | 414 return SetStream0Data(buf, offset, buf_len, truncate); |
| 412 if (offset == 0 && truncate) | |
| 413 RecordHeaderSizeChange(cache_type_, data_size_[0], buf_len); | |
| 414 else | |
| 415 RecordUnexpectedStream0Write(cache_type_); | |
| 416 } | |
| 417 | 415 |
| 418 // We can only do optimistic Write if there is no pending operations, so | 416 // We can only do optimistic Write if there is no pending operations, so |
| 419 // that we are sure that the next call to RunNextOperationIfNeeded will | 417 // that we are sure that the next call to RunNextOperationIfNeeded will |
| 420 // actually run the write operation that sets the stream size. It also | 418 // actually run the write operation that sets the stream size. It also |
| 421 // prevents from previous possibly-conflicting writes that could be stacked | 419 // prevents from previous possibly-conflicting writes that could be stacked |
| 422 // in the |pending_operations_|. We could optimize this for when we have | 420 // in the |pending_operations_|. We could optimize this for when we have |
| 423 // only read operations enqueued. | 421 // only read operations enqueued. |
| 424 const bool optimistic = | 422 const bool optimistic = |
| 425 (use_optimistic_operations_ && state_ == STATE_READY && | 423 (use_optimistic_operations_ && state_ == STATE_READY && |
| 426 pending_operations_.size() == 0); | 424 pending_operations_.size() == 0); |
| (...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 687 DCHECK_EQ(STATE_UNINITIALIZED, state_); | 685 DCHECK_EQ(STATE_UNINITIALIZED, state_); |
| 688 DCHECK(!synchronous_entry_); | 686 DCHECK(!synchronous_entry_); |
| 689 | 687 |
| 690 state_ = STATE_IO_PENDING; | 688 state_ = STATE_IO_PENDING; |
| 691 | 689 |
| 692 // Since we don't know the correct values for |last_used_| and | 690 // Since we don't know the correct values for |last_used_| and |
| 693 // |last_modified_| yet, we make this approximation. | 691 // |last_modified_| yet, we make this approximation. |
| 694 last_used_ = last_modified_ = base::Time::Now(); | 692 last_used_ = last_modified_ = base::Time::Now(); |
| 695 | 693 |
| 696 // If creation succeeds, we should mark all streams to be saved on close. | 694 // If creation succeeds, we should mark all streams to be saved on close. |
| 697 for (int i = 0; i < kSimpleEntryFileCount; ++i) | 695 for (int i = 0; i < kSimpleEntryStreamCount; ++i) |
| 698 have_written_[i] = true; | 696 have_written_[i] = true; |
| 699 | 697 |
| 700 const base::TimeTicks start_time = base::TimeTicks::Now(); | 698 const base::TimeTicks start_time = base::TimeTicks::Now(); |
| 701 scoped_ptr<SimpleEntryCreationResults> results( | 699 scoped_ptr<SimpleEntryCreationResults> results( |
| 702 new SimpleEntryCreationResults( | 700 new SimpleEntryCreationResults( |
| 703 SimpleEntryStat(last_used_, last_modified_, data_size_))); | 701 SimpleEntryStat(last_used_, last_modified_, data_size_))); |
| 704 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, | 702 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, |
| 705 cache_type_, | 703 cache_type_, |
| 706 path_, | 704 path_, |
| 707 key_, | 705 key_, |
| (...skipping 14 matching lines...) Expand all Loading... |
| 722 DCHECK(io_thread_checker_.CalledOnValidThread()); | 720 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 723 typedef SimpleSynchronousEntry::CRCRecord CRCRecord; | 721 typedef SimpleSynchronousEntry::CRCRecord CRCRecord; |
| 724 scoped_ptr<std::vector<CRCRecord> > | 722 scoped_ptr<std::vector<CRCRecord> > |
| 725 crc32s_to_write(new std::vector<CRCRecord>()); | 723 crc32s_to_write(new std::vector<CRCRecord>()); |
| 726 | 724 |
| 727 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN); | 725 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN); |
| 728 | 726 |
| 729 if (state_ == STATE_READY) { | 727 if (state_ == STATE_READY) { |
| 730 DCHECK(synchronous_entry_); | 728 DCHECK(synchronous_entry_); |
| 731 state_ = STATE_IO_PENDING; | 729 state_ = STATE_IO_PENDING; |
| 732 for (int i = 0; i < kSimpleEntryFileCount; ++i) { | 730 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { |
| 733 if (have_written_[i]) { | 731 if (have_written_[i]) { |
| 734 if (GetDataSize(i) == crc32s_end_offset_[i]) { | 732 if (GetDataSize(i) == crc32s_end_offset_[i]) { |
| 735 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; | 733 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; |
| 736 crc32s_to_write->push_back(CRCRecord(i, true, crc)); | 734 crc32s_to_write->push_back(CRCRecord(i, true, crc)); |
| 737 } else { | 735 } else { |
| 738 crc32s_to_write->push_back(CRCRecord(i, false, 0)); | 736 crc32s_to_write->push_back(CRCRecord(i, false, 0)); |
| 739 } | 737 } |
| 740 } | 738 } |
| 741 } | 739 } |
| 742 } else { | 740 } else { |
| 743 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_); | 741 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_); |
| 744 } | 742 } |
| 745 | 743 |
| 746 if (synchronous_entry_) { | 744 if (synchronous_entry_) { |
| 747 Closure task = | 745 Closure task = |
| 748 base::Bind(&SimpleSynchronousEntry::Close, | 746 base::Bind(&SimpleSynchronousEntry::Close, |
| 749 base::Unretained(synchronous_entry_), | 747 base::Unretained(synchronous_entry_), |
| 750 SimpleEntryStat(last_used_, last_modified_, data_size_), | 748 SimpleEntryStat(last_used_, last_modified_, data_size_), |
| 751 base::Passed(&crc32s_to_write)); | 749 base::Passed(&crc32s_to_write), |
| 750 stream_0_data_); |
| 752 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); | 751 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); |
| 753 synchronous_entry_ = NULL; | 752 synchronous_entry_ = NULL; |
| 754 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | 753 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); |
| 755 | 754 |
| 756 for (int i = 0; i < kSimpleEntryFileCount; ++i) { | 755 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { |
| 757 if (!have_written_[i]) { | 756 if (!have_written_[i]) { |
| 758 SIMPLE_CACHE_UMA(ENUMERATION, | 757 SIMPLE_CACHE_UMA(ENUMERATION, |
| 759 "CheckCRCResult", cache_type_, | 758 "CheckCRCResult", cache_type_, |
| 760 crc_check_state_[i], CRC_CHECK_MAX); | 759 crc_check_state_[i], CRC_CHECK_MAX); |
| 761 } | 760 } |
| 762 } | 761 } |
| 763 } else { | 762 } else { |
| 764 CloseOperationComplete(); | 763 CloseOperationComplete(); |
| 765 } | 764 } |
| 766 } | 765 } |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 801 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN); | 800 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN); |
| 802 // If there is nothing to read, we bail out before setting state_ to | 801 // If there is nothing to read, we bail out before setting state_ to |
| 803 // STATE_IO_PENDING. | 802 // STATE_IO_PENDING. |
| 804 if (!callback.is_null()) | 803 if (!callback.is_null()) |
| 805 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0)); | 804 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0)); |
| 806 return; | 805 return; |
| 807 } | 806 } |
| 808 | 807 |
| 809 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset); | 808 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset); |
| 810 | 809 |
| 810 // Since stream 0 data is kept in memory, it is read immediately. |
| 811 if (stream_index == 0) { |
| 812 int ret_value = ReadStream0Data(buf, offset, buf_len); |
| 813 if (!callback.is_null()) { |
| 814 MessageLoopProxy::current()->PostTask(FROM_HERE, |
| 815 base::Bind(callback, ret_value)); |
| 816 } |
| 817 return; |
| 818 } |
| 819 |
| 811 state_ = STATE_IO_PENDING; | 820 state_ = STATE_IO_PENDING; |
| 812 if (!doomed_ && backend_.get()) | 821 if (!doomed_ && backend_.get()) |
| 813 backend_->index()->UseIfExists(entry_hash_); | 822 backend_->index()->UseIfExists(entry_hash_); |
| 814 | 823 |
| 815 scoped_ptr<uint32> read_crc32(new uint32()); | 824 scoped_ptr<uint32> read_crc32(new uint32()); |
| 816 scoped_ptr<int> result(new int()); | 825 scoped_ptr<int> result(new int()); |
| 817 scoped_ptr<base::Time> last_used(new base::Time()); | 826 scoped_ptr<SimpleEntryStat> entry_stat( |
| 827 new SimpleEntryStat(last_used_, last_modified_, data_size_)); |
| 818 Closure task = base::Bind( | 828 Closure task = base::Bind( |
| 819 &SimpleSynchronousEntry::ReadData, | 829 &SimpleSynchronousEntry::ReadData, |
| 820 base::Unretained(synchronous_entry_), | 830 base::Unretained(synchronous_entry_), |
| 821 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len), | 831 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len), |
| 822 make_scoped_refptr(buf), | 832 make_scoped_refptr(buf), |
| 823 read_crc32.get(), | 833 read_crc32.get(), |
| 824 last_used.get(), | 834 entry_stat.get(), |
| 825 result.get()); | 835 result.get()); |
| 826 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, | 836 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, |
| 827 this, | 837 this, |
| 828 stream_index, | 838 stream_index, |
| 829 offset, | 839 offset, |
| 830 callback, | 840 callback, |
| 831 base::Passed(&read_crc32), | 841 base::Passed(&read_crc32), |
| 832 base::Passed(&last_used), | 842 base::Passed(&entry_stat), |
| 833 base::Passed(&result)); | 843 base::Passed(&result)); |
| 834 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | 844 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); |
| 835 } | 845 } |
| 836 | 846 |
| 837 void SimpleEntryImpl::WriteDataInternal(int stream_index, | 847 void SimpleEntryImpl::WriteDataInternal(int stream_index, |
| 838 int offset, | 848 int offset, |
| 839 net::IOBuffer* buf, | 849 net::IOBuffer* buf, |
| 840 int buf_len, | 850 int buf_len, |
| 841 const CompletionCallback& callback, | 851 const CompletionCallback& callback, |
| 842 bool truncate) { | 852 bool truncate) { |
| (...skipping 16 matching lines...) Expand all Loading... |
| 859 } | 869 } |
| 860 if (!callback.is_null()) { | 870 if (!callback.is_null()) { |
| 861 MessageLoopProxy::current()->PostTask( | 871 MessageLoopProxy::current()->PostTask( |
| 862 FROM_HERE, base::Bind(callback, net::ERR_FAILED)); | 872 FROM_HERE, base::Bind(callback, net::ERR_FAILED)); |
| 863 } | 873 } |
| 864 // |this| may be destroyed after return here. | 874 // |this| may be destroyed after return here. |
| 865 return; | 875 return; |
| 866 } | 876 } |
| 867 | 877 |
| 868 DCHECK_EQ(STATE_READY, state_); | 878 DCHECK_EQ(STATE_READY, state_); |
| 879 |
| 880 // Since stream 0 data is kept in memory, it will be written immediatly. |
| 881 if (stream_index == 0) { |
| 882 int ret_value = SetStream0Data(buf, offset, buf_len, truncate); |
| 883 if (!callback.is_null()) { |
| 884 MessageLoopProxy::current()->PostTask(FROM_HERE, |
| 885 base::Bind(callback, ret_value)); |
| 886 } |
| 887 return; |
| 888 } |
| 889 |
| 869 state_ = STATE_IO_PENDING; | 890 state_ = STATE_IO_PENDING; |
| 870 if (!doomed_ && backend_.get()) | 891 if (!doomed_ && backend_.get()) |
| 871 backend_->index()->UseIfExists(entry_hash_); | 892 backend_->index()->UseIfExists(entry_hash_); |
| 872 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|) | 893 |
| 873 // if |offset == 0| or we have already computed the CRC for [0 .. offset). | 894 AdvanceCrc(buf, offset, buf_len, stream_index); |
| 874 // We rely on most write operations being sequential, start to end to compute | |
| 875 // the crc of the data. When we write to an entry and close without having | |
| 876 // done a sequential write, we don't check the CRC on read. | |
| 877 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) { | |
| 878 uint32 initial_crc = (offset != 0) ? crc32s_[stream_index] | |
| 879 : crc32(0, Z_NULL, 0); | |
| 880 if (buf_len > 0) { | |
| 881 crc32s_[stream_index] = crc32(initial_crc, | |
| 882 reinterpret_cast<const Bytef*>(buf->data()), | |
| 883 buf_len); | |
| 884 } | |
| 885 crc32s_end_offset_[stream_index] = offset + buf_len; | |
| 886 } | |
| 887 | 895 |
| 888 // |entry_stat| needs to be initialized before modifying |data_size_|. | 896 // |entry_stat| needs to be initialized before modifying |data_size_|. |
| 889 scoped_ptr<SimpleEntryStat> entry_stat( | 897 scoped_ptr<SimpleEntryStat> entry_stat( |
| 890 new SimpleEntryStat(last_used_, last_modified_, data_size_)); | 898 new SimpleEntryStat(last_used_, last_modified_, data_size_)); |
| 891 if (truncate) { | 899 if (truncate) { |
| 892 data_size_[stream_index] = offset + buf_len; | 900 data_size_[stream_index] = offset + buf_len; |
| 893 } else { | 901 } else { |
| 894 data_size_[stream_index] = std::max(offset + buf_len, | 902 data_size_[stream_index] = std::max(offset + buf_len, |
| 895 GetDataSize(stream_index)); | 903 GetDataSize(stream_index)); |
| 896 } | 904 } |
| 897 | 905 |
| 898 // Since we don't know the correct values for |last_used_| and | 906 // Since we don't know the correct values for |last_used_| and |
| 899 // |last_modified_| yet, we make this approximation. | 907 // |last_modified_| yet, we make this approximation. |
| 900 last_used_ = last_modified_ = base::Time::Now(); | 908 last_used_ = last_modified_ = base::Time::Now(); |
| 901 | 909 |
| 902 have_written_[stream_index] = true; | 910 have_written_[stream_index] = true; |
| 911 // Writing on stream 1 affects the placement of stream 0 in the file, the EOF |
| 912 // record will have to be rewritten. |
| 913 if (stream_index == 1) |
| 914 have_written_[0] = true; |
| 903 | 915 |
| 904 scoped_ptr<int> result(new int()); | 916 scoped_ptr<int> result(new int()); |
| 905 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData, | 917 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData, |
| 906 base::Unretained(synchronous_entry_), | 918 base::Unretained(synchronous_entry_), |
| 907 SimpleSynchronousEntry::EntryOperationData( | 919 SimpleSynchronousEntry::EntryOperationData( |
| 908 stream_index, offset, buf_len, truncate), | 920 stream_index, offset, buf_len, truncate), |
| 909 make_scoped_refptr(buf), | 921 make_scoped_refptr(buf), |
| 910 entry_stat.get(), | 922 entry_stat.get(), |
| 911 result.get()); | 923 result.get()); |
| 912 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, | 924 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 949 MakeUninitialized(); | 961 MakeUninitialized(); |
| 950 return; | 962 return; |
| 951 } | 963 } |
| 952 // If out_entry is NULL, it means we already called ReturnEntryToCaller from | 964 // If out_entry is NULL, it means we already called ReturnEntryToCaller from |
| 953 // the optimistic Create case. | 965 // the optimistic Create case. |
| 954 if (out_entry) | 966 if (out_entry) |
| 955 ReturnEntryToCaller(out_entry); | 967 ReturnEntryToCaller(out_entry); |
| 956 | 968 |
| 957 state_ = STATE_READY; | 969 state_ = STATE_READY; |
| 958 synchronous_entry_ = in_results->sync_entry; | 970 synchronous_entry_ = in_results->sync_entry; |
| 971 if (in_results->stream_0_data) { |
| 972 stream_0_data_ = in_results->stream_0_data; |
| 973 // The crc was read in SimpleSynchronousEntry. |
| 974 crc_check_state_[0] = CRC_CHECK_DONE; |
| 975 crc32s_[0] = in_results->stream_0_crc32; |
| 976 crc32s_end_offset_[0] = in_results->entry_stat.data_size(0); |
| 977 } |
| 959 if (key_.empty()) { | 978 if (key_.empty()) { |
| 960 SetKey(synchronous_entry_->key()); | 979 SetKey(synchronous_entry_->key()); |
| 961 } else { | 980 } else { |
| 962 // This should only be triggered when creating an entry. The key check in | 981 // This should only be triggered when creating an entry. The key check in |
| 963 // the open case is handled in SimpleBackendImpl. | 982 // the open case is handled in SimpleBackendImpl. |
| 964 DCHECK_EQ(key_, synchronous_entry_->key()); | 983 DCHECK_EQ(key_, synchronous_entry_->key()); |
| 965 } | 984 } |
| 966 UpdateDataFromEntryStat(in_results->entry_stat); | 985 UpdateDataFromEntryStat(in_results->entry_stat); |
| 967 SIMPLE_CACHE_UMA(TIMES, | 986 SIMPLE_CACHE_UMA(TIMES, |
| 968 "EntryCreationTime", cache_type_, | 987 "EntryCreationTime", cache_type_, |
| (...skipping 27 matching lines...) Expand all Loading... |
| 996 completion_callback, *result)); | 1015 completion_callback, *result)); |
| 997 } | 1016 } |
| 998 RunNextOperationIfNeeded(); | 1017 RunNextOperationIfNeeded(); |
| 999 } | 1018 } |
| 1000 | 1019 |
| 1001 void SimpleEntryImpl::ReadOperationComplete( | 1020 void SimpleEntryImpl::ReadOperationComplete( |
| 1002 int stream_index, | 1021 int stream_index, |
| 1003 int offset, | 1022 int offset, |
| 1004 const CompletionCallback& completion_callback, | 1023 const CompletionCallback& completion_callback, |
| 1005 scoped_ptr<uint32> read_crc32, | 1024 scoped_ptr<uint32> read_crc32, |
| 1006 scoped_ptr<base::Time> last_used, | 1025 scoped_ptr<SimpleEntryStat> entry_stat, |
| 1007 scoped_ptr<int> result) { | 1026 scoped_ptr<int> result) { |
| 1008 DCHECK(io_thread_checker_.CalledOnValidThread()); | 1027 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 1009 DCHECK(synchronous_entry_); | 1028 DCHECK(synchronous_entry_); |
| 1010 DCHECK_EQ(STATE_IO_PENDING, state_); | 1029 DCHECK_EQ(STATE_IO_PENDING, state_); |
| 1011 DCHECK(read_crc32); | 1030 DCHECK(read_crc32); |
| 1012 DCHECK(result); | 1031 DCHECK(result); |
| 1013 | 1032 |
| 1014 if (*result > 0 && | 1033 if (*result > 0 && |
| 1015 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) { | 1034 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) { |
| 1016 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END; | 1035 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END; |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1032 // entry, one reader can be behind the other. In this case we compute | 1051 // entry, one reader can be behind the other. In this case we compute |
| 1033 // the crc as the most advanced reader progresses, and check it for | 1052 // the crc as the most advanced reader progresses, and check it for |
| 1034 // both readers as they read the last byte. | 1053 // both readers as they read the last byte. |
| 1035 | 1054 |
| 1036 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN); | 1055 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN); |
| 1037 | 1056 |
| 1038 scoped_ptr<int> new_result(new int()); | 1057 scoped_ptr<int> new_result(new int()); |
| 1039 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord, | 1058 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord, |
| 1040 base::Unretained(synchronous_entry_), | 1059 base::Unretained(synchronous_entry_), |
| 1041 stream_index, | 1060 stream_index, |
| 1042 data_size_[stream_index], | 1061 *entry_stat, |
| 1043 crc32s_[stream_index], | 1062 crc32s_[stream_index], |
| 1044 new_result.get()); | 1063 new_result.get()); |
| 1045 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete, | 1064 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete, |
| 1046 this, *result, stream_index, | 1065 this, *result, stream_index, |
| 1047 completion_callback, | 1066 completion_callback, |
| 1048 base::Passed(&new_result)); | 1067 base::Passed(&new_result)); |
| 1049 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); | 1068 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); |
| 1050 crc_check_state_[stream_index] = CRC_CHECK_DONE; | 1069 crc_check_state_[stream_index] = CRC_CHECK_DONE; |
| 1051 return; | 1070 return; |
| 1052 } | 1071 } |
| 1053 } | 1072 } |
| 1054 | 1073 |
| 1055 if (*result < 0) { | 1074 if (*result < 0) { |
| 1056 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); | 1075 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); |
| 1057 } else { | 1076 } else { |
| 1058 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); | 1077 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); |
| 1059 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END && | 1078 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END && |
| 1060 offset + *result == GetDataSize(stream_index)) { | 1079 offset + *result == GetDataSize(stream_index)) { |
| 1061 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE; | 1080 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE; |
| 1062 } | 1081 } |
| 1063 } | 1082 } |
| 1064 if (net_log_.IsLoggingAllEvents()) { | 1083 if (net_log_.IsLoggingAllEvents()) { |
| 1065 net_log_.AddEvent( | 1084 net_log_.AddEvent( |
| 1066 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, | 1085 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, |
| 1067 CreateNetLogReadWriteCompleteCallback(*result)); | 1086 CreateNetLogReadWriteCompleteCallback(*result)); |
| 1068 } | 1087 } |
| 1069 | 1088 |
| 1070 EntryOperationComplete( | 1089 EntryOperationComplete( |
| 1071 stream_index, | 1090 stream_index, completion_callback, *entry_stat, result.Pass()); |
| 1072 completion_callback, | |
| 1073 SimpleEntryStat(*last_used, last_modified_, data_size_), | |
| 1074 result.Pass()); | |
| 1075 } | 1091 } |
| 1076 | 1092 |
| 1077 void SimpleEntryImpl::WriteOperationComplete( | 1093 void SimpleEntryImpl::WriteOperationComplete( |
| 1078 int stream_index, | 1094 int stream_index, |
| 1079 const CompletionCallback& completion_callback, | 1095 const CompletionCallback& completion_callback, |
| 1080 scoped_ptr<SimpleEntryStat> entry_stat, | 1096 scoped_ptr<SimpleEntryStat> entry_stat, |
| 1081 scoped_ptr<int> result) { | 1097 scoped_ptr<int> result) { |
| 1082 if (*result >= 0) | 1098 if (*result >= 0) |
| 1083 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); | 1099 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); |
| 1084 else | 1100 else |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1151 MakeUninitialized(); | 1167 MakeUninitialized(); |
| 1152 RunNextOperationIfNeeded(); | 1168 RunNextOperationIfNeeded(); |
| 1153 } | 1169 } |
| 1154 | 1170 |
| 1155 void SimpleEntryImpl::UpdateDataFromEntryStat( | 1171 void SimpleEntryImpl::UpdateDataFromEntryStat( |
| 1156 const SimpleEntryStat& entry_stat) { | 1172 const SimpleEntryStat& entry_stat) { |
| 1157 DCHECK(io_thread_checker_.CalledOnValidThread()); | 1173 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 1158 DCHECK(synchronous_entry_); | 1174 DCHECK(synchronous_entry_); |
| 1159 DCHECK_EQ(STATE_READY, state_); | 1175 DCHECK_EQ(STATE_READY, state_); |
| 1160 | 1176 |
| 1161 last_used_ = entry_stat.last_used; | 1177 last_used_ = entry_stat.last_used(); |
| 1162 last_modified_ = entry_stat.last_modified; | 1178 last_modified_ = entry_stat.last_modified(); |
| 1163 for (int i = 0; i < kSimpleEntryFileCount; ++i) { | 1179 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { |
| 1164 data_size_[i] = entry_stat.data_size[i]; | 1180 data_size_[i] = entry_stat.data_size(i); |
| 1165 } | 1181 } |
| 1166 if (!doomed_ && backend_.get()) | 1182 if (!doomed_ && backend_.get()) |
| 1167 backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage()); | 1183 backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage()); |
| 1168 } | 1184 } |
| 1169 | 1185 |
| 1170 int64 SimpleEntryImpl::GetDiskUsage() const { | 1186 int64 SimpleEntryImpl::GetDiskUsage() const { |
| 1171 int64 file_size = 0; | 1187 int64 file_size = 0; |
| 1172 for (int i = 0; i < kSimpleEntryFileCount; ++i) { | 1188 for (int i = 0; i < kSimpleEntryStreamCount; ++i) { |
| 1173 file_size += | 1189 file_size += |
| 1174 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]); | 1190 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]); |
| 1175 } | 1191 } |
| 1176 return file_size; | 1192 return file_size; |
| 1177 } | 1193 } |
| 1178 | 1194 |
| 1179 void SimpleEntryImpl::RecordReadIsParallelizable( | 1195 void SimpleEntryImpl::RecordReadIsParallelizable( |
| 1180 const SimpleEntryOperation& operation) const { | 1196 const SimpleEntryOperation& operation) const { |
| 1181 if (!executing_operation_) | 1197 if (!executing_operation_) |
| 1182 return; | 1198 return; |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1240 } else { | 1256 } else { |
| 1241 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE | 1257 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE |
| 1242 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE; | 1258 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE; |
| 1243 } | 1259 } |
| 1244 } | 1260 } |
| 1245 SIMPLE_CACHE_UMA(ENUMERATION, | 1261 SIMPLE_CACHE_UMA(ENUMERATION, |
| 1246 "WriteDependencyType", cache_type_, | 1262 "WriteDependencyType", cache_type_, |
| 1247 type, WRITE_DEPENDENCY_TYPE_MAX); | 1263 type, WRITE_DEPENDENCY_TYPE_MAX); |
| 1248 } | 1264 } |
| 1249 | 1265 |
| 1266 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf, |
| 1267 int offset, |
| 1268 int buf_len) { |
| 1269 if (buf_len < 0) { |
| 1270 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); |
| 1271 return 0; |
| 1272 } |
| 1273 memcpy(buf->data(), stream_0_data_->data() + offset, buf_len); |
| 1274 UpdateDataFromEntryStat( |
| 1275 SimpleEntryStat(base::Time::Now(), last_modified_, data_size_)); |
| 1276 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); |
| 1277 return buf_len; |
| 1278 } |
| 1279 |
| 1280 int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf, |
| 1281 int offset, |
| 1282 int buf_len, |
| 1283 bool truncate) { |
| 1284 // Currently, stream 0 is only used for HTTP headers, and always writes them |
| 1285 // with a single, truncating write. Detect these writes and record the size |
| 1286 // changes of the headers. Also, support writes to stream 0 that have |
| 1287 // different access patterns, as required by the API contract. |
| 1288 // All other clients of the Simple Cache are encouraged to use stream 1. |
| 1289 have_written_[0] = true; |
| 1290 int data_size = GetDataSize(0); |
| 1291 if (offset == 0 && truncate) { |
| 1292 RecordHeaderSizeChange(cache_type_, data_size, buf_len); |
| 1293 stream_0_data_->SetCapacity(buf_len); |
| 1294 memcpy(stream_0_data_->data(), buf->data(), buf_len); |
| 1295 data_size_[0] = buf_len; |
| 1296 } else { |
| 1297 RecordUnexpectedStream0Write(cache_type_); |
| 1298 const int buffer_size = |
| 1299 truncate ? offset + buf_len : std::max(offset + buf_len, data_size); |
| 1300 stream_0_data_->SetCapacity(buffer_size); |
| 1301 // If |stream_0_data_| was extended, the extension until offset needs to be |
| 1302 // zero-filled. |
| 1303 const int fill_size = offset <= data_size ? 0 : offset - data_size; |
| 1304 if (fill_size > 0) |
| 1305 memset(stream_0_data_->data() + data_size, 0, fill_size); |
| 1306 if (buf) |
| 1307 memcpy(stream_0_data_->data() + offset, buf->data(), buf_len); |
| 1308 data_size_[0] = buffer_size; |
| 1309 } |
| 1310 base::Time modification_time = base::Time::Now(); |
| 1311 AdvanceCrc(buf, offset, buf_len, 0); |
| 1312 UpdateDataFromEntryStat( |
| 1313 SimpleEntryStat(modification_time, modification_time, data_size_)); |
| 1314 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); |
| 1315 return buf_len; |
| 1316 } |
| 1317 |
| 1318 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer* buffer, |
| 1319 int offset, |
| 1320 int length, |
| 1321 int stream_index) { |
| 1322 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|) |
| 1323 // if |offset == 0| or we have already computed the CRC for [0 .. offset). |
| 1324 // We rely on most write operations being sequential, start to end to compute |
| 1325 // the crc of the data. When we write to an entry and close without having |
| 1326 // done a sequential write, we don't check the CRC on read. |
| 1327 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) { |
| 1328 uint32 initial_crc = |
| 1329 (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0); |
| 1330 if (length > 0) { |
| 1331 crc32s_[stream_index] = crc32( |
| 1332 initial_crc, reinterpret_cast<const Bytef*>(buffer->data()), length); |
| 1333 } |
| 1334 crc32s_end_offset_[stream_index] = offset + length; |
| 1335 } |
| 1336 } |
| 1337 |
| 1250 } // namespace disk_cache | 1338 } // namespace disk_cache |
| OLD | NEW |