| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/entry_impl.h" | 5 #include "net/disk_cache/entry_impl.h" |
| 6 | 6 |
| 7 #include "base/message_loop.h" | 7 #include "base/message_loop.h" |
| 8 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
| 9 #include "base/string_util.h" | 9 #include "base/string_util.h" |
| 10 #include "net/base/io_buffer.h" | 10 #include "net/base/io_buffer.h" |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 99 backend_->BufferDeleted(capacity() - kMaxBlockSize); | 99 backend_->BufferDeleted(capacity() - kMaxBlockSize); |
| 100 } | 100 } |
| 101 | 101 |
| 102 // Returns true if we can handle writing |len| bytes to |offset|. | 102 // Returns true if we can handle writing |len| bytes to |offset|. |
| 103 bool PreWrite(int offset, int len); | 103 bool PreWrite(int offset, int len); |
| 104 | 104 |
| 105 // Truncates the buffer to |offset| bytes. | 105 // Truncates the buffer to |offset| bytes. |
| 106 void Truncate(int offset); | 106 void Truncate(int offset); |
| 107 | 107 |
| 108 // Writes |len| bytes from |buf| at the given |offset|. | 108 // Writes |len| bytes from |buf| at the given |offset|. |
| 109 void Write(int offset, net::IOBuffer* buf, int len); | 109 void Write(int offset, IOBuffer* buf, int len); |
| 110 | 110 |
| 111 // Returns true if we can read |len| bytes from |offset|, given that the | 111 // Returns true if we can read |len| bytes from |offset|, given that the |
| 112 // actual file has |eof| bytes stored. Note that the number of bytes to read | 112 // actual file has |eof| bytes stored. Note that the number of bytes to read |
| 113 // may be modified by this method even though it returns false: that means we | 113 // may be modified by this method even though it returns false: that means we |
| 114 // should do a smaller read from disk. | 114 // should do a smaller read from disk. |
| 115 bool PreRead(int eof, int offset, int* len); | 115 bool PreRead(int eof, int offset, int* len); |
| 116 | 116 |
| 117 // Read |len| bytes from |buf| at the given |offset|. | 117 // Read |len| bytes from |buf| at the given |offset|. |
| 118 int Read(int offset, net::IOBuffer* buf, int len); | 118 int Read(int offset, IOBuffer* buf, int len); |
| 119 | 119 |
| 120 // Prepare this buffer for reuse. | 120 // Prepare this buffer for reuse. |
| 121 void Reset(); | 121 void Reset(); |
| 122 | 122 |
| 123 char* Data() { return buffer_.size() ? &buffer_[0] : NULL; } | 123 char* Data() { return buffer_.size() ? &buffer_[0] : NULL; } |
| 124 int Size() { return static_cast<int>(buffer_.size()); } | 124 int Size() { return static_cast<int>(buffer_.size()); } |
| 125 int Start() { return offset_; } | 125 int Start() { return offset_; } |
| 126 int End() { return offset_ + Size(); } | 126 int End() { return offset_ + Size(); } |
| 127 | 127 |
| 128 private: | 128 private: |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 161 void EntryImpl::UserBuffer::Truncate(int offset) { | 161 void EntryImpl::UserBuffer::Truncate(int offset) { |
| 162 DCHECK_GE(offset, 0); | 162 DCHECK_GE(offset, 0); |
| 163 DCHECK_GE(offset, offset_); | 163 DCHECK_GE(offset, offset_); |
| 164 DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_; | 164 DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_; |
| 165 | 165 |
| 166 offset -= offset_; | 166 offset -= offset_; |
| 167 if (Size() >= offset) | 167 if (Size() >= offset) |
| 168 buffer_.resize(offset); | 168 buffer_.resize(offset); |
| 169 } | 169 } |
| 170 | 170 |
| 171 void EntryImpl::UserBuffer::Write(int offset, net::IOBuffer* buf, int len) { | 171 void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) { |
| 172 DCHECK_GE(offset, 0); | 172 DCHECK_GE(offset, 0); |
| 173 DCHECK_GE(len, 0); | 173 DCHECK_GE(len, 0); |
| 174 DCHECK_GE(offset + len, 0); | 174 DCHECK_GE(offset + len, 0); |
| 175 DCHECK_GE(offset, offset_); | 175 DCHECK_GE(offset, offset_); |
| 176 DVLOG(3) << "Buffer write at " << offset << " current " << offset_; | 176 DVLOG(3) << "Buffer write at " << offset << " current " << offset_; |
| 177 | 177 |
| 178 if (!Size() && offset > kMaxBlockSize) | 178 if (!Size() && offset > kMaxBlockSize) |
| 179 offset_ = offset; | 179 offset_ = offset; |
| 180 | 180 |
| 181 offset -= offset_; | 181 offset -= offset_; |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 218 return false; | 218 return false; |
| 219 } | 219 } |
| 220 | 220 |
| 221 if (!Size()) | 221 if (!Size()) |
| 222 return false; | 222 return false; |
| 223 | 223 |
| 224 // See if we can fulfill the first part of the operation. | 224 // See if we can fulfill the first part of the operation. |
| 225 return (offset - offset_ < Size()); | 225 return (offset - offset_ < Size()); |
| 226 } | 226 } |
| 227 | 227 |
| 228 int EntryImpl::UserBuffer::Read(int offset, net::IOBuffer* buf, int len) { | 228 int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) { |
| 229 DCHECK_GE(offset, 0); | 229 DCHECK_GE(offset, 0); |
| 230 DCHECK_GT(len, 0); | 230 DCHECK_GT(len, 0); |
| 231 DCHECK(Size() || offset < offset_); | 231 DCHECK(Size() || offset < offset_); |
| 232 | 232 |
| 233 int clean_bytes = 0; | 233 int clean_bytes = 0; |
| 234 if (offset < offset_) { | 234 if (offset < offset_) { |
| 235 // We don't have a file so lets fill the first part with 0. | 235 // We don't have a file so lets fill the first part with 0. |
| 236 clean_bytes = std::min(offset_ - offset, len); | 236 clean_bytes = std::min(offset_ - offset, len); |
| 237 memset(buf->data(), 0, clean_bytes); | 237 memset(buf->data(), 0, clean_bytes); |
| 238 if (len == clean_bytes) | 238 if (len == clean_bytes) |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 285 | 285 |
| 286 DVLOG(3) << "Buffer grow to " << required; | 286 DVLOG(3) << "Buffer grow to " << required; |
| 287 | 287 |
| 288 buffer_.reserve(required); | 288 buffer_.reserve(required); |
| 289 return true; | 289 return true; |
| 290 } | 290 } |
| 291 | 291 |
| 292 // ------------------------------------------------------------------------ | 292 // ------------------------------------------------------------------------ |
| 293 | 293 |
| 294 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) | 294 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) |
| 295 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), backend_(backend), | 295 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), |
| 296 doomed_(false), read_only_(read_only), dirty_(false) { | 296 backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only), |
| 297 dirty_(false) { |
| 297 entry_.LazyInit(backend->File(address), address); | 298 entry_.LazyInit(backend->File(address), address); |
| 298 for (int i = 0; i < kNumStreams; i++) { | 299 for (int i = 0; i < kNumStreams; i++) { |
| 299 unreported_size_[i] = 0; | 300 unreported_size_[i] = 0; |
| 300 } | 301 } |
| 301 } | 302 } |
| 302 | 303 |
| 303 void EntryImpl::DoomImpl() { | 304 void EntryImpl::DoomImpl() { |
| 304 if (doomed_) | 305 if (doomed_ || !backend_) |
| 305 return; | 306 return; |
| 306 | 307 |
| 307 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); | 308 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); |
| 308 backend_->InternalDoomEntry(this); | 309 backend_->InternalDoomEntry(this); |
| 309 } | 310 } |
| 310 | 311 |
| 311 int EntryImpl::ReadDataImpl( | 312 int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, |
| 312 int index, int offset, net::IOBuffer* buf, int buf_len, | 313 const CompletionCallback& callback) { |
| 313 const net::CompletionCallback& callback) { | |
| 314 if (net_log_.IsLoggingAllEvents()) { | 314 if (net_log_.IsLoggingAllEvents()) { |
| 315 net_log_.BeginEvent( | 315 net_log_.BeginEvent( |
| 316 net::NetLog::TYPE_ENTRY_READ_DATA, | 316 net::NetLog::TYPE_ENTRY_READ_DATA, |
| 317 make_scoped_refptr( | 317 make_scoped_refptr( |
| 318 new ReadWriteDataParameters(index, offset, buf_len, false))); | 318 new ReadWriteDataParameters(index, offset, buf_len, false))); |
| 319 } | 319 } |
| 320 | 320 |
| 321 int result = InternalReadData(index, offset, buf, buf_len, callback); | 321 int result = InternalReadData(index, offset, buf, buf_len, callback); |
| 322 | 322 |
| 323 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { | 323 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { |
| 324 net_log_.EndEvent( | 324 net_log_.EndEvent( |
| 325 net::NetLog::TYPE_ENTRY_READ_DATA, | 325 net::NetLog::TYPE_ENTRY_READ_DATA, |
| 326 make_scoped_refptr(new ReadWriteCompleteParameters(result))); | 326 make_scoped_refptr(new ReadWriteCompleteParameters(result))); |
| 327 } | 327 } |
| 328 return result; | 328 return result; |
| 329 } | 329 } |
| 330 | 330 |
| 331 int EntryImpl::WriteDataImpl( | 331 int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, |
| 332 int index, int offset, net::IOBuffer* buf, int buf_len, | 332 const CompletionCallback& callback, |
| 333 const net::CompletionCallback& callback, bool truncate) { | 333 bool truncate) { |
| 334 if (net_log_.IsLoggingAllEvents()) { | 334 if (net_log_.IsLoggingAllEvents()) { |
| 335 net_log_.BeginEvent( | 335 net_log_.BeginEvent( |
| 336 net::NetLog::TYPE_ENTRY_WRITE_DATA, | 336 net::NetLog::TYPE_ENTRY_WRITE_DATA, |
| 337 make_scoped_refptr( | 337 make_scoped_refptr( |
| 338 new ReadWriteDataParameters(index, offset, buf_len, truncate))); | 338 new ReadWriteDataParameters(index, offset, buf_len, truncate))); |
| 339 } | 339 } |
| 340 | 340 |
| 341 int result = InternalWriteData(index, offset, buf, buf_len, callback, | 341 int result = InternalWriteData(index, offset, buf, buf_len, callback, |
| 342 truncate); | 342 truncate); |
| 343 | 343 |
| 344 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { | 344 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { |
| 345 net_log_.EndEvent( | 345 net_log_.EndEvent( |
| 346 net::NetLog::TYPE_ENTRY_WRITE_DATA, | 346 net::NetLog::TYPE_ENTRY_WRITE_DATA, |
| 347 make_scoped_refptr(new ReadWriteCompleteParameters(result))); | 347 make_scoped_refptr(new ReadWriteCompleteParameters(result))); |
| 348 } | 348 } |
| 349 return result; | 349 return result; |
| 350 } | 350 } |
| 351 | 351 |
| 352 int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len, | 352 int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, |
| 353 const net::CompletionCallback& callback) { | 353 const CompletionCallback& callback) { |
| 354 DCHECK(node_.Data()->dirty || read_only_); | 354 DCHECK(node_.Data()->dirty || read_only_); |
| 355 int result = InitSparseData(); | 355 int result = InitSparseData(); |
| 356 if (net::OK != result) | 356 if (net::OK != result) |
| 357 return result; | 357 return result; |
| 358 | 358 |
| 359 TimeTicks start = TimeTicks::Now(); | 359 TimeTicks start = TimeTicks::Now(); |
| 360 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, | 360 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, |
| 361 callback); | 361 callback); |
| 362 ReportIOTime(kSparseRead, start); | 362 ReportIOTime(kSparseRead, start); |
| 363 return result; | 363 return result; |
| 364 } | 364 } |
| 365 | 365 |
| 366 int EntryImpl::WriteSparseDataImpl( | 366 int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, |
| 367 int64 offset, net::IOBuffer* buf, int buf_len, | 367 const CompletionCallback& callback) { |
| 368 const net::CompletionCallback& callback) { | |
| 369 DCHECK(node_.Data()->dirty || read_only_); | 368 DCHECK(node_.Data()->dirty || read_only_); |
| 370 int result = InitSparseData(); | 369 int result = InitSparseData(); |
| 371 if (net::OK != result) | 370 if (net::OK != result) |
| 372 return result; | 371 return result; |
| 373 | 372 |
| 374 TimeTicks start = TimeTicks::Now(); | 373 TimeTicks start = TimeTicks::Now(); |
| 375 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, | 374 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, |
| 376 buf_len, callback); | 375 buf_len, callback); |
| 377 ReportIOTime(kSparseWrite, start); | 376 ReportIOTime(kSparseWrite, start); |
| 378 return result; | 377 return result; |
| 379 } | 378 } |
| 380 | 379 |
| 381 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { | 380 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { |
| 382 int result = InitSparseData(); | 381 int result = InitSparseData(); |
| 383 if (net::OK != result) | 382 if (net::OK != result) |
| 384 return result; | 383 return result; |
| 385 | 384 |
| 386 return sparse_->GetAvailableRange(offset, len, start); | 385 return sparse_->GetAvailableRange(offset, len, start); |
| 387 } | 386 } |
| 388 | 387 |
| 389 void EntryImpl::CancelSparseIOImpl() { | 388 void EntryImpl::CancelSparseIOImpl() { |
| 390 if (!sparse_.get()) | 389 if (!sparse_.get()) |
| 391 return; | 390 return; |
| 392 | 391 |
| 393 sparse_->CancelIO(); | 392 sparse_->CancelIO(); |
| 394 } | 393 } |
| 395 | 394 |
| 396 int EntryImpl::ReadyForSparseIOImpl(const net::CompletionCallback& callback) { | 395 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { |
| 397 DCHECK(sparse_.get()); | 396 DCHECK(sparse_.get()); |
| 398 return sparse_->ReadyToUse(callback); | 397 return sparse_->ReadyToUse(callback); |
| 399 } | 398 } |
| 400 | 399 |
| 401 uint32 EntryImpl::GetHash() { | 400 uint32 EntryImpl::GetHash() { |
| 402 return entry_.Data()->hash; | 401 return entry_.Data()->hash; |
| 403 } | 402 } |
| 404 | 403 |
| 405 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, | 404 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, |
| 406 uint32 hash) { | 405 uint32 hash) { |
| (...skipping 264 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 671 stored->data_size[i] = 0; | 670 stored->data_size[i] = 0; |
| 672 } | 671 } |
| 673 entry_.Store(); | 672 entry_.Store(); |
| 674 } | 673 } |
| 675 | 674 |
| 676 void EntryImpl::IncrementIoCount() { | 675 void EntryImpl::IncrementIoCount() { |
| 677 backend_->IncrementIoCount(); | 676 backend_->IncrementIoCount(); |
| 678 } | 677 } |
| 679 | 678 |
| 680 void EntryImpl::DecrementIoCount() { | 679 void EntryImpl::DecrementIoCount() { |
| 681 backend_->DecrementIoCount(); | 680 if (backend_) |
| 681 backend_->DecrementIoCount(); |
| 682 } |
| 683 |
| 684 void EntryImpl::OnEntryCreated(BackendImpl* backend) { |
| 685 // Just grab a reference to the backround queue. |
| 686 background_queue_ = backend->GetBackgroundQueue(); |
| 682 } | 687 } |
| 683 | 688 |
| 684 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { | 689 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { |
| 685 node_.Data()->last_used = last_used.ToInternalValue(); | 690 node_.Data()->last_used = last_used.ToInternalValue(); |
| 686 node_.Data()->last_modified = last_modified.ToInternalValue(); | 691 node_.Data()->last_modified = last_modified.ToInternalValue(); |
| 687 node_.set_modified(); | 692 node_.set_modified(); |
| 688 } | 693 } |
| 689 | 694 |
| 690 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { | 695 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { |
| 696 if (!backend_) |
| 697 return; |
| 698 |
| 691 int group = backend_->GetSizeGroup(); | 699 int group = backend_->GetSizeGroup(); |
| 692 switch (op) { | 700 switch (op) { |
| 693 case kRead: | 701 case kRead: |
| 694 CACHE_UMA(AGE_MS, "ReadTime", group, start); | 702 CACHE_UMA(AGE_MS, "ReadTime", group, start); |
| 695 break; | 703 break; |
| 696 case kWrite: | 704 case kWrite: |
| 697 CACHE_UMA(AGE_MS, "WriteTime", group, start); | 705 CACHE_UMA(AGE_MS, "WriteTime", group, start); |
| 698 break; | 706 break; |
| 699 case kSparseRead: | 707 case kSparseRead: |
| 700 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); | 708 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 737 | 745 |
| 738 if (key_size < key1_len || key_size > kMaxInternalKeyLength) | 746 if (key_size < key1_len || key_size > kMaxInternalKeyLength) |
| 739 return 1; | 747 return 1; |
| 740 | 748 |
| 741 return ((key_size - key1_len) / 256 + 2); | 749 return ((key_size - key1_len) / 256 + 2); |
| 742 } | 750 } |
| 743 | 751 |
| 744 // ------------------------------------------------------------------------ | 752 // ------------------------------------------------------------------------ |
| 745 | 753 |
| 746 void EntryImpl::Doom() { | 754 void EntryImpl::Doom() { |
| 747 backend_->background_queue()->DoomEntryImpl(this); | 755 if (background_queue_) |
| 756 background_queue_->DoomEntryImpl(this); |
| 748 } | 757 } |
| 749 | 758 |
| 750 void EntryImpl::Close() { | 759 void EntryImpl::Close() { |
| 751 backend_->background_queue()->CloseEntryImpl(this); | 760 if (background_queue_) |
| 761 background_queue_->CloseEntryImpl(this); |
| 752 } | 762 } |
| 753 | 763 |
| 754 std::string EntryImpl::GetKey() const { | 764 std::string EntryImpl::GetKey() const { |
| 755 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 765 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 756 int key_len = entry->Data()->key_len; | 766 int key_len = entry->Data()->key_len; |
| 757 if (key_len <= kMaxInternalKeyLength) | 767 if (key_len <= kMaxInternalKeyLength) |
| 758 return std::string(entry->Data()->key); | 768 return std::string(entry->Data()->key); |
| 759 | 769 |
| 760 // We keep a copy of the key so that we can always return it, even if the | 770 // We keep a copy of the key so that we can always return it, even if the |
| 761 // backend is disabled. | 771 // backend is disabled. |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 794 } | 804 } |
| 795 | 805 |
| 796 int32 EntryImpl::GetDataSize(int index) const { | 806 int32 EntryImpl::GetDataSize(int index) const { |
| 797 if (index < 0 || index >= kNumStreams) | 807 if (index < 0 || index >= kNumStreams) |
| 798 return 0; | 808 return 0; |
| 799 | 809 |
| 800 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 810 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 801 return entry->Data()->data_size[index]; | 811 return entry->Data()->data_size[index]; |
| 802 } | 812 } |
| 803 | 813 |
| 804 int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len, | 814 int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, |
| 805 const net::CompletionCallback& callback) { | 815 const CompletionCallback& callback) { |
| 806 if (callback.is_null()) | 816 if (callback.is_null()) |
| 807 return ReadDataImpl(index, offset, buf, buf_len, callback); | 817 return ReadDataImpl(index, offset, buf, buf_len, callback); |
| 808 | 818 |
| 809 DCHECK(node_.Data()->dirty || read_only_); | 819 DCHECK(node_.Data()->dirty || read_only_); |
| 810 if (index < 0 || index >= kNumStreams) | 820 if (index < 0 || index >= kNumStreams) |
| 811 return net::ERR_INVALID_ARGUMENT; | 821 return net::ERR_INVALID_ARGUMENT; |
| 812 | 822 |
| 813 int entry_size = entry_.Data()->data_size[index]; | 823 int entry_size = entry_.Data()->data_size[index]; |
| 814 if (offset >= entry_size || offset < 0 || !buf_len) | 824 if (offset >= entry_size || offset < 0 || !buf_len) |
| 815 return 0; | 825 return 0; |
| 816 | 826 |
| 817 if (buf_len < 0) | 827 if (buf_len < 0) |
| 818 return net::ERR_INVALID_ARGUMENT; | 828 return net::ERR_INVALID_ARGUMENT; |
| 819 | 829 |
| 820 backend_->background_queue()->ReadData(this, index, offset, buf, buf_len, | 830 if (!background_queue_) |
| 821 callback); | 831 return net::ERR_UNEXPECTED; |
| 832 |
| 833 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); |
| 822 return net::ERR_IO_PENDING; | 834 return net::ERR_IO_PENDING; |
| 823 } | 835 } |
| 824 | 836 |
| 825 int EntryImpl::WriteData( | 837 int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, |
| 826 int index, int offset, net::IOBuffer* buf, int buf_len, | 838 const CompletionCallback& callback, bool truncate) { |
| 827 const net::CompletionCallback& callback, bool truncate) { | |
| 828 if (callback.is_null()) | 839 if (callback.is_null()) |
| 829 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); | 840 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); |
| 830 | 841 |
| 831 DCHECK(node_.Data()->dirty || read_only_); | 842 DCHECK(node_.Data()->dirty || read_only_); |
| 832 if (index < 0 || index >= kNumStreams) | 843 if (index < 0 || index >= kNumStreams) |
| 833 return net::ERR_INVALID_ARGUMENT; | 844 return net::ERR_INVALID_ARGUMENT; |
| 834 | 845 |
| 835 if (offset < 0 || buf_len < 0) | 846 if (offset < 0 || buf_len < 0) |
| 836 return net::ERR_INVALID_ARGUMENT; | 847 return net::ERR_INVALID_ARGUMENT; |
| 837 | 848 |
| 838 backend_->background_queue()->WriteData(this, index, offset, buf, buf_len, | 849 if (!background_queue_) |
| 839 truncate, callback); | 850 return net::ERR_UNEXPECTED; |
| 851 |
| 852 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, |
| 853 callback); |
| 840 return net::ERR_IO_PENDING; | 854 return net::ERR_IO_PENDING; |
| 841 } | 855 } |
| 842 | 856 |
| 843 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | 857 int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, |
| 844 const net::CompletionCallback& callback) { | 858 const CompletionCallback& callback) { |
| 845 if (callback.is_null()) | 859 if (callback.is_null()) |
| 846 return ReadSparseDataImpl(offset, buf, buf_len, callback); | 860 return ReadSparseDataImpl(offset, buf, buf_len, callback); |
| 847 | 861 |
| 848 backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len, | 862 if (!background_queue_) |
| 849 callback); | 863 return net::ERR_UNEXPECTED; |
| 864 |
| 865 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); |
| 850 return net::ERR_IO_PENDING; | 866 return net::ERR_IO_PENDING; |
| 851 } | 867 } |
| 852 | 868 |
| 853 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | 869 int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, |
| 854 const net::CompletionCallback& callback) { | 870 const CompletionCallback& callback) { |
| 855 if (callback.is_null()) | 871 if (callback.is_null()) |
| 856 return WriteSparseDataImpl(offset, buf, buf_len, callback); | 872 return WriteSparseDataImpl(offset, buf, buf_len, callback); |
| 857 | 873 |
| 858 backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len, | 874 if (!background_queue_) |
| 859 callback); | 875 return net::ERR_UNEXPECTED; |
| 876 |
| 877 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); |
| 860 return net::ERR_IO_PENDING; | 878 return net::ERR_IO_PENDING; |
| 861 } | 879 } |
| 862 | 880 |
| 863 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, | 881 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, |
| 864 const net::CompletionCallback& callback) { | 882 const CompletionCallback& callback) { |
| 865 backend_->background_queue()->GetAvailableRange(this, offset, len, start, | 883 if (!background_queue_) |
| 866 callback); | 884 return net::ERR_UNEXPECTED; |
| 885 |
| 886 background_queue_->GetAvailableRange(this, offset, len, start, callback); |
| 867 return net::ERR_IO_PENDING; | 887 return net::ERR_IO_PENDING; |
| 868 } | 888 } |
| 869 | 889 |
| 870 bool EntryImpl::CouldBeSparse() const { | 890 bool EntryImpl::CouldBeSparse() const { |
| 871 if (sparse_.get()) | 891 if (sparse_.get()) |
| 872 return true; | 892 return true; |
| 873 | 893 |
| 874 scoped_ptr<SparseControl> sparse; | 894 scoped_ptr<SparseControl> sparse; |
| 875 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); | 895 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); |
| 876 return sparse->CouldBeSparse(); | 896 return sparse->CouldBeSparse(); |
| 877 } | 897 } |
| 878 | 898 |
| 879 void EntryImpl::CancelSparseIO() { | 899 void EntryImpl::CancelSparseIO() { |
| 880 backend_->background_queue()->CancelSparseIO(this); | 900 if (background_queue_) |
| 901 background_queue_->CancelSparseIO(this); |
| 881 } | 902 } |
| 882 | 903 |
| 883 int EntryImpl::ReadyForSparseIO(const net::CompletionCallback& callback) { | 904 int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { |
| 884 if (!sparse_.get()) | 905 if (!sparse_.get()) |
| 885 return net::OK; | 906 return net::OK; |
| 886 | 907 |
| 887 backend_->background_queue()->ReadyForSparseIO(this, callback); | 908 if (!background_queue_) |
| 909 return net::ERR_UNEXPECTED; |
| 910 |
| 911 background_queue_->ReadyForSparseIO(this, callback); |
| 888 return net::ERR_IO_PENDING; | 912 return net::ERR_IO_PENDING; |
| 889 } | 913 } |
| 890 | 914 |
| 891 // When an entry is deleted from the cache, we clean up all the data associated | 915 // When an entry is deleted from the cache, we clean up all the data associated |
| 892 // with it for two reasons: to simplify the reuse of the block (we know that any | 916 // with it for two reasons: to simplify the reuse of the block (we know that any |
| 893 // unused block is filled with zeros), and to simplify the handling of write / | 917 // unused block is filled with zeros), and to simplify the handling of write / |
| 894 // read partial information from an entry (don't have to worry about returning | 918 // read partial information from an entry (don't have to worry about returning |
| 895 // data related to a previous cache entry because the range was not fully | 919 // data related to a previous cache entry because the range was not fully |
| 896 // written before). | 920 // written before). |
| 897 EntryImpl::~EntryImpl() { | 921 EntryImpl::~EntryImpl() { |
| 898 Log("~EntryImpl in"); | 922 Log("~EntryImpl in"); |
| 923 if (!backend_) { |
| 924 entry_.clear_modified(); |
| 925 node_.clear_modified(); |
| 926 return; |
| 927 } |
| 899 | 928 |
| 900 // Save the sparse info to disk. This will generate IO for this entry and | 929 // Save the sparse info to disk. This will generate IO for this entry and |
| 901 // maybe for a child entry, so it is important to do it before deleting this | 930 // maybe for a child entry, so it is important to do it before deleting this |
| 902 // entry. | 931 // entry. |
| 903 sparse_.reset(); | 932 sparse_.reset(); |
| 904 | 933 |
| 905 // Remove this entry from the list of open entries. | 934 // Remove this entry from the list of open entries. |
| 906 backend_->OnEntryDestroyBegin(entry_.address()); | 935 backend_->OnEntryDestroyBegin(entry_.address()); |
| 907 | 936 |
| 908 if (doomed_) { | 937 if (doomed_) { |
| (...skipping 27 matching lines...) Expand all Loading... |
| 936 } | 965 } |
| 937 } | 966 } |
| 938 | 967 |
| 939 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); | 968 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); |
| 940 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, NULL); | 969 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, NULL); |
| 941 backend_->OnEntryDestroyEnd(); | 970 backend_->OnEntryDestroyEnd(); |
| 942 } | 971 } |
| 943 | 972 |
| 944 // ------------------------------------------------------------------------ | 973 // ------------------------------------------------------------------------ |
| 945 | 974 |
| 946 int EntryImpl::InternalReadData( | 975 int EntryImpl::InternalReadData(int index, int offset, |
| 947 int index, int offset, net::IOBuffer* buf, int buf_len, | 976 IOBuffer* buf, int buf_len, |
| 948 const net::CompletionCallback& callback) { | 977 const CompletionCallback& callback) { |
| 949 DCHECK(node_.Data()->dirty || read_only_); | 978 DCHECK(node_.Data()->dirty || read_only_); |
| 950 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; | 979 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; |
| 951 if (index < 0 || index >= kNumStreams) | 980 if (index < 0 || index >= kNumStreams) |
| 952 return net::ERR_INVALID_ARGUMENT; | 981 return net::ERR_INVALID_ARGUMENT; |
| 953 | 982 |
| 954 int entry_size = entry_.Data()->data_size[index]; | 983 int entry_size = entry_.Data()->data_size[index]; |
| 955 if (offset >= entry_size || offset < 0 || !buf_len) | 984 if (offset >= entry_size || offset < 0 || !buf_len) |
| 956 return 0; | 985 return 0; |
| 957 | 986 |
| 958 if (buf_len < 0) | 987 if (buf_len < 0) |
| 959 return net::ERR_INVALID_ARGUMENT; | 988 return net::ERR_INVALID_ARGUMENT; |
| 960 | 989 |
| 990 if (!backend_) |
| 991 return net::ERR_UNEXPECTED; |
| 992 |
| 961 TimeTicks start = TimeTicks::Now(); | 993 TimeTicks start = TimeTicks::Now(); |
| 962 | 994 |
| 963 if (offset + buf_len > entry_size) | 995 if (offset + buf_len > entry_size) |
| 964 buf_len = entry_size - offset; | 996 buf_len = entry_size - offset; |
| 965 | 997 |
| 966 UpdateRank(false); | 998 UpdateRank(false); |
| 967 | 999 |
| 968 backend_->OnEvent(Stats::READ_DATA); | 1000 backend_->OnEvent(Stats::READ_DATA); |
| 969 backend_->OnRead(buf_len); | 1001 backend_->OnRead(buf_len); |
| 970 | 1002 |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1017 if (io_callback && completed) | 1049 if (io_callback && completed) |
| 1018 io_callback->Discard(); | 1050 io_callback->Discard(); |
| 1019 | 1051 |
| 1020 if (io_callback) | 1052 if (io_callback) |
| 1021 ReportIOTime(kReadAsync1, start_async); | 1053 ReportIOTime(kReadAsync1, start_async); |
| 1022 | 1054 |
| 1023 ReportIOTime(kRead, start); | 1055 ReportIOTime(kRead, start); |
| 1024 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; | 1056 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; |
| 1025 } | 1057 } |
| 1026 | 1058 |
| 1027 int EntryImpl::InternalWriteData( | 1059 int EntryImpl::InternalWriteData(int index, int offset, |
| 1028 int index, int offset, net::IOBuffer* buf, int buf_len, | 1060 IOBuffer* buf, int buf_len, |
| 1029 const net::CompletionCallback& callback, bool truncate) { | 1061 const CompletionCallback& callback, |
| 1062 bool truncate) { |
| 1030 DCHECK(node_.Data()->dirty || read_only_); | 1063 DCHECK(node_.Data()->dirty || read_only_); |
| 1031 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; | 1064 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; |
| 1032 if (index < 0 || index >= kNumStreams) | 1065 if (index < 0 || index >= kNumStreams) |
| 1033 return net::ERR_INVALID_ARGUMENT; | 1066 return net::ERR_INVALID_ARGUMENT; |
| 1034 | 1067 |
| 1035 if (offset < 0 || buf_len < 0) | 1068 if (offset < 0 || buf_len < 0) |
| 1036 return net::ERR_INVALID_ARGUMENT; | 1069 return net::ERR_INVALID_ARGUMENT; |
| 1037 | 1070 |
| 1071 if (!backend_) |
| 1072 return net::ERR_UNEXPECTED; |
| 1073 |
| 1038 int max_file_size = backend_->MaxFileSize(); | 1074 int max_file_size = backend_->MaxFileSize(); |
| 1039 | 1075 |
| 1040 // offset or buf_len could be negative numbers. | 1076 // offset or buf_len could be negative numbers. |
| 1041 if (offset > max_file_size || buf_len > max_file_size || | 1077 if (offset > max_file_size || buf_len > max_file_size || |
| 1042 offset + buf_len > max_file_size) { | 1078 offset + buf_len > max_file_size) { |
| 1043 int size = offset + buf_len; | 1079 int size = offset + buf_len; |
| 1044 if (size <= max_file_size) | 1080 if (size <= max_file_size) |
| 1045 size = kint32max; | 1081 size = kint32max; |
| 1046 backend_->TooMuchStorageRequested(size); | 1082 backend_->TooMuchStorageRequested(size); |
| 1047 return net::ERR_FAILED; | 1083 return net::ERR_FAILED; |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1133 if (!CreateBlock(size, &address)) | 1169 if (!CreateBlock(size, &address)) |
| 1134 return false; | 1170 return false; |
| 1135 | 1171 |
| 1136 entry_.Data()->data_addr[index] = address.value(); | 1172 entry_.Data()->data_addr[index] = address.value(); |
| 1137 entry_.Store(); | 1173 entry_.Store(); |
| 1138 return true; | 1174 return true; |
| 1139 } | 1175 } |
| 1140 | 1176 |
| 1141 bool EntryImpl::CreateBlock(int size, Addr* address) { | 1177 bool EntryImpl::CreateBlock(int size, Addr* address) { |
| 1142 DCHECK(!address->is_initialized()); | 1178 DCHECK(!address->is_initialized()); |
| 1179 if (!backend_) |
| 1180 return false; |
| 1143 | 1181 |
| 1144 FileType file_type = Addr::RequiredFileType(size); | 1182 FileType file_type = Addr::RequiredFileType(size); |
| 1145 if (EXTERNAL == file_type) { | 1183 if (EXTERNAL == file_type) { |
| 1146 if (size > backend_->MaxFileSize()) | 1184 if (size > backend_->MaxFileSize()) |
| 1147 return false; | 1185 return false; |
| 1148 if (!backend_->CreateExternalFile(address)) | 1186 if (!backend_->CreateExternalFile(address)) |
| 1149 return false; | 1187 return false; |
| 1150 } else { | 1188 } else { |
| 1151 int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) / | 1189 int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) / |
| 1152 Addr::BlockSizeForFileType(file_type); | 1190 Addr::BlockSizeForFileType(file_type); |
| 1153 | 1191 |
| 1154 if (!backend_->CreateBlock(file_type, num_blocks, address)) | 1192 if (!backend_->CreateBlock(file_type, num_blocks, address)) |
| 1155 return false; | 1193 return false; |
| 1156 } | 1194 } |
| 1157 return true; | 1195 return true; |
| 1158 } | 1196 } |
| 1159 | 1197 |
| 1160 // Note that this method may end up modifying a block file so upon return the | 1198 // Note that this method may end up modifying a block file so upon return the |
| 1161 // involved block will be free, and could be reused for something else. If there | 1199 // involved block will be free, and could be reused for something else. If there |
| 1162 // is a crash after that point (and maybe before returning to the caller), the | 1200 // is a crash after that point (and maybe before returning to the caller), the |
| 1163 // entry will be left dirty... and at some point it will be discarded; it is | 1201 // entry will be left dirty... and at some point it will be discarded; it is |
| 1164 // important that the entry doesn't keep a reference to this address, or we'll | 1202 // important that the entry doesn't keep a reference to this address, or we'll |
| 1165 // end up deleting the contents of |address| once again. | 1203 // end up deleting the contents of |address| once again. |
| 1166 void EntryImpl::DeleteData(Addr address, int index) { | 1204 void EntryImpl::DeleteData(Addr address, int index) { |
| 1205 DCHECK(backend_); |
| 1167 if (!address.is_initialized()) | 1206 if (!address.is_initialized()) |
| 1168 return; | 1207 return; |
| 1169 if (address.is_separate_file()) { | 1208 if (address.is_separate_file()) { |
| 1170 int failure = !DeleteCacheFile(backend_->GetFileName(address)); | 1209 int failure = !DeleteCacheFile(backend_->GetFileName(address)); |
| 1171 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); | 1210 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); |
| 1172 if (failure) { | 1211 if (failure) { |
| 1173 LOG(ERROR) << "Failed to delete " << | 1212 LOG(ERROR) << "Failed to delete " << |
| 1174 backend_->GetFileName(address).value() << " from the cache."; | 1213 backend_->GetFileName(address).value() << " from the cache."; |
| 1175 } | 1214 } |
| 1176 if (files_[index]) | 1215 if (files_[index]) |
| 1177 files_[index] = NULL; // Releases the object. | 1216 files_[index] = NULL; // Releases the object. |
| 1178 } else { | 1217 } else { |
| 1179 backend_->DeleteBlock(address, true); | 1218 backend_->DeleteBlock(address, true); |
| 1180 } | 1219 } |
| 1181 } | 1220 } |
| 1182 | 1221 |
| 1183 void EntryImpl::UpdateRank(bool modified) { | 1222 void EntryImpl::UpdateRank(bool modified) { |
| 1223 if (!backend_) |
| 1224 return; |
| 1225 |
| 1184 if (!doomed_) { | 1226 if (!doomed_) { |
| 1185 // Everything is handled by the backend. | 1227 // Everything is handled by the backend. |
| 1186 backend_->UpdateRank(this, modified); | 1228 backend_->UpdateRank(this, modified); |
| 1187 return; | 1229 return; |
| 1188 } | 1230 } |
| 1189 | 1231 |
| 1190 Time current = Time::Now(); | 1232 Time current = Time::Now(); |
| 1191 node_.Data()->last_used = current.ToInternalValue(); | 1233 node_.Data()->last_used = current.ToInternalValue(); |
| 1192 | 1234 |
| 1193 if (modified) | 1235 if (modified) |
| 1194 node_.Data()->last_modified = current.ToInternalValue(); | 1236 node_.Data()->last_modified = current.ToInternalValue(); |
| 1195 } | 1237 } |
| 1196 | 1238 |
| 1197 File* EntryImpl::GetBackingFile(Addr address, int index) { | 1239 File* EntryImpl::GetBackingFile(Addr address, int index) { |
| 1240 if (!backend_) |
| 1241 return NULL; |
| 1242 |
| 1198 File* file; | 1243 File* file; |
| 1199 if (address.is_separate_file()) | 1244 if (address.is_separate_file()) |
| 1200 file = GetExternalFile(address, index); | 1245 file = GetExternalFile(address, index); |
| 1201 else | 1246 else |
| 1202 file = backend_->File(address); | 1247 file = backend_->File(address); |
| 1203 return file; | 1248 return file; |
| 1204 } | 1249 } |
| 1205 | 1250 |
| 1206 File* EntryImpl::GetExternalFile(Addr address, int index) { | 1251 File* EntryImpl::GetExternalFile(Addr address, int index) { |
| 1207 DCHECK(index >= 0 && index <= kKeyFileIndex); | 1252 DCHECK(index >= 0 && index <= kKeyFileIndex); |
| (...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1459 void EntryImpl::SetEntryFlags(uint32 flags) { | 1504 void EntryImpl::SetEntryFlags(uint32 flags) { |
| 1460 entry_.Data()->flags |= flags; | 1505 entry_.Data()->flags |= flags; |
| 1461 entry_.set_modified(); | 1506 entry_.set_modified(); |
| 1462 } | 1507 } |
| 1463 | 1508 |
| 1464 uint32 EntryImpl::GetEntryFlags() { | 1509 uint32 EntryImpl::GetEntryFlags() { |
| 1465 return entry_.Data()->flags; | 1510 return entry_.Data()->flags; |
| 1466 } | 1511 } |
| 1467 | 1512 |
| 1468 void EntryImpl::GetData(int index, char** buffer, Addr* address) { | 1513 void EntryImpl::GetData(int index, char** buffer, Addr* address) { |
| 1514 DCHECK(backend_); |
| 1469 if (user_buffers_[index].get() && user_buffers_[index]->Size() && | 1515 if (user_buffers_[index].get() && user_buffers_[index]->Size() && |
| 1470 !user_buffers_[index]->Start()) { | 1516 !user_buffers_[index]->Start()) { |
| 1471 // The data is already in memory, just copy it and we're done. | 1517 // The data is already in memory, just copy it and we're done. |
| 1472 int data_len = entry_.Data()->data_size[index]; | 1518 int data_len = entry_.Data()->data_size[index]; |
| 1473 if (data_len <= user_buffers_[index]->Size()) { | 1519 if (data_len <= user_buffers_[index]->Size()) { |
| 1474 DCHECK(!user_buffers_[index]->Start()); | 1520 DCHECK(!user_buffers_[index]->Start()); |
| 1475 *buffer = new char[data_len]; | 1521 *buffer = new char[data_len]; |
| 1476 memcpy(*buffer, user_buffers_[index]->Data(), data_len); | 1522 memcpy(*buffer, user_buffers_[index]->Data(), data_len); |
| 1477 return; | 1523 return; |
| 1478 } | 1524 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1500 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | 1546 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), |
| 1501 entry_.address().value(), node_.address().value()); | 1547 entry_.address().value(), node_.address().value()); |
| 1502 | 1548 |
| 1503 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | 1549 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], |
| 1504 entry_.Data()->data_addr[1], entry_.Data()->long_key); | 1550 entry_.Data()->data_addr[1], entry_.Data()->long_key); |
| 1505 | 1551 |
| 1506 Trace(" doomed: %d 0x%x", doomed_, dirty); | 1552 Trace(" doomed: %d 0x%x", doomed_, dirty); |
| 1507 } | 1553 } |
| 1508 | 1554 |
| 1509 } // namespace disk_cache | 1555 } // namespace disk_cache |
| OLD | NEW |