| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/blockfile/entry_impl.h" | 5 #include "net/disk_cache/blockfile/entry_impl.h" |
| 6 | 6 |
| 7 #include "base/hash.h" | 7 #include "base/hash.h" |
| 8 #include "base/message_loop/message_loop.h" | 8 #include "base/message_loop/message_loop.h" |
| 9 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
| 10 #include "base/strings/string_util.h" | 10 #include "base/strings/string_util.h" |
| (...skipping 14 matching lines...) Expand all Loading... |
| 25 using base::TimeDelta; | 25 using base::TimeDelta; |
| 26 using base::TimeTicks; | 26 using base::TimeTicks; |
| 27 | 27 |
| 28 namespace { | 28 namespace { |
| 29 | 29 |
| 30 // Index for the file used to store the key, if any (files_[kKeyFileIndex]). | 30 // Index for the file used to store the key, if any (files_[kKeyFileIndex]). |
| 31 const int kKeyFileIndex = 3; | 31 const int kKeyFileIndex = 3; |
| 32 | 32 |
| 33 // This class implements FileIOCallback to buffer the callback from a file IO | 33 // This class implements FileIOCallback to buffer the callback from a file IO |
| 34 // operation from the actual net class. | 34 // operation from the actual net class. |
| 35 class SyncCallback: public disk_cache::FileIOCallback { | 35 class SyncCallback : public disk_cache::FileIOCallback { |
| 36 public: | 36 public: |
| 37 // |end_event_type| is the event type to log on completion. Logs nothing on | 37 // |end_event_type| is the event type to log on completion. Logs nothing on |
| 38 // discard, or when the NetLog is not set to log all events. | 38 // discard, or when the NetLog is not set to log all events. |
| 39 SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer, | 39 SyncCallback(disk_cache::EntryImpl* entry, |
| 40 net::IOBuffer* buffer, |
| 40 const net::CompletionCallback& callback, | 41 const net::CompletionCallback& callback, |
| 41 net::NetLog::EventType end_event_type) | 42 net::NetLog::EventType end_event_type) |
| 42 : entry_(entry), callback_(callback), buf_(buffer), | 43 : entry_(entry), |
| 43 start_(TimeTicks::Now()), end_event_type_(end_event_type) { | 44 callback_(callback), |
| 45 buf_(buffer), |
| 46 start_(TimeTicks::Now()), |
| 47 end_event_type_(end_event_type) { |
| 44 entry->AddRef(); | 48 entry->AddRef(); |
| 45 entry->IncrementIoCount(); | 49 entry->IncrementIoCount(); |
| 46 } | 50 } |
| 47 virtual ~SyncCallback() {} | 51 virtual ~SyncCallback() {} |
| 48 | 52 |
| 49 virtual void OnFileIOComplete(int bytes_copied) OVERRIDE; | 53 virtual void OnFileIOComplete(int bytes_copied) OVERRIDE; |
| 50 void Discard(); | 54 void Discard(); |
| 51 | 55 |
| 52 private: | 56 private: |
| 53 disk_cache::EntryImpl* entry_; | 57 disk_cache::EntryImpl* entry_; |
| (...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 289 | 293 |
| 290 DVLOG(3) << "Buffer grow to " << required; | 294 DVLOG(3) << "Buffer grow to " << required; |
| 291 | 295 |
| 292 buffer_.reserve(required); | 296 buffer_.reserve(required); |
| 293 return true; | 297 return true; |
| 294 } | 298 } |
| 295 | 299 |
| 296 // ------------------------------------------------------------------------ | 300 // ------------------------------------------------------------------------ |
| 297 | 301 |
| 298 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) | 302 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) |
| 299 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), | 303 : entry_(NULL, Addr(0)), |
| 300 backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only), | 304 node_(NULL, Addr(0)), |
| 305 backend_(backend->GetWeakPtr()), |
| 306 doomed_(false), |
| 307 read_only_(read_only), |
| 301 dirty_(false) { | 308 dirty_(false) { |
| 302 entry_.LazyInit(backend->File(address), address); | 309 entry_.LazyInit(backend->File(address), address); |
| 303 for (int i = 0; i < kNumStreams; i++) { | 310 for (int i = 0; i < kNumStreams; i++) { |
| 304 unreported_size_[i] = 0; | 311 unreported_size_[i] = 0; |
| 305 } | 312 } |
| 306 } | 313 } |
| 307 | 314 |
| 308 void EntryImpl::DoomImpl() { | 315 void EntryImpl::DoomImpl() { |
| 309 if (doomed_ || !backend_.get()) | 316 if (doomed_ || !backend_.get()) |
| 310 return; | 317 return; |
| 311 | 318 |
| 312 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); | 319 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); |
| 313 backend_->InternalDoomEntry(this); | 320 backend_->InternalDoomEntry(this); |
| 314 } | 321 } |
| 315 | 322 |
| 316 int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, | 323 int EntryImpl::ReadDataImpl(int index, |
| 324 int offset, |
| 325 IOBuffer* buf, |
| 326 int buf_len, |
| 317 const CompletionCallback& callback) { | 327 const CompletionCallback& callback) { |
| 318 if (net_log_.IsLogging()) { | 328 if (net_log_.IsLogging()) { |
| 319 net_log_.BeginEvent( | 329 net_log_.BeginEvent( |
| 320 net::NetLog::TYPE_ENTRY_READ_DATA, | 330 net::NetLog::TYPE_ENTRY_READ_DATA, |
| 321 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); | 331 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); |
| 322 } | 332 } |
| 323 | 333 |
| 324 int result = InternalReadData(index, offset, buf, buf_len, callback); | 334 int result = InternalReadData(index, offset, buf, buf_len, callback); |
| 325 | 335 |
| 326 if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) { | 336 if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) { |
| 327 net_log_.EndEvent( | 337 net_log_.EndEvent(net::NetLog::TYPE_ENTRY_READ_DATA, |
| 328 net::NetLog::TYPE_ENTRY_READ_DATA, | 338 CreateNetLogReadWriteCompleteCallback(result)); |
| 329 CreateNetLogReadWriteCompleteCallback(result)); | |
| 330 } | 339 } |
| 331 return result; | 340 return result; |
| 332 } | 341 } |
| 333 | 342 |
| 334 int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, | 343 int EntryImpl::WriteDataImpl(int index, |
| 344 int offset, |
| 345 IOBuffer* buf, |
| 346 int buf_len, |
| 335 const CompletionCallback& callback, | 347 const CompletionCallback& callback, |
| 336 bool truncate) { | 348 bool truncate) { |
| 337 if (net_log_.IsLogging()) { | 349 if (net_log_.IsLogging()) { |
| 338 net_log_.BeginEvent( | 350 net_log_.BeginEvent( |
| 339 net::NetLog::TYPE_ENTRY_WRITE_DATA, | 351 net::NetLog::TYPE_ENTRY_WRITE_DATA, |
| 340 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); | 352 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); |
| 341 } | 353 } |
| 342 | 354 |
| 343 int result = InternalWriteData(index, offset, buf, buf_len, callback, | 355 int result = |
| 344 truncate); | 356 InternalWriteData(index, offset, buf, buf_len, callback, truncate); |
| 345 | 357 |
| 346 if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) { | 358 if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) { |
| 347 net_log_.EndEvent( | 359 net_log_.EndEvent(net::NetLog::TYPE_ENTRY_WRITE_DATA, |
| 348 net::NetLog::TYPE_ENTRY_WRITE_DATA, | 360 CreateNetLogReadWriteCompleteCallback(result)); |
| 349 CreateNetLogReadWriteCompleteCallback(result)); | |
| 350 } | 361 } |
| 351 return result; | 362 return result; |
| 352 } | 363 } |
| 353 | 364 |
| 354 int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, | 365 int EntryImpl::ReadSparseDataImpl(int64 offset, |
| 366 IOBuffer* buf, |
| 367 int buf_len, |
| 355 const CompletionCallback& callback) { | 368 const CompletionCallback& callback) { |
| 356 DCHECK(node_.Data()->dirty || read_only_); | 369 DCHECK(node_.Data()->dirty || read_only_); |
| 357 int result = InitSparseData(); | 370 int result = InitSparseData(); |
| 358 if (net::OK != result) | 371 if (net::OK != result) |
| 359 return result; | 372 return result; |
| 360 | 373 |
| 361 TimeTicks start = TimeTicks::Now(); | 374 TimeTicks start = TimeTicks::Now(); |
| 362 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, | 375 result = sparse_->StartIO( |
| 363 callback); | 376 SparseControl::kReadOperation, offset, buf, buf_len, callback); |
| 364 ReportIOTime(kSparseRead, start); | 377 ReportIOTime(kSparseRead, start); |
| 365 return result; | 378 return result; |
| 366 } | 379 } |
| 367 | 380 |
| 368 int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, | 381 int EntryImpl::WriteSparseDataImpl(int64 offset, |
| 382 IOBuffer* buf, |
| 383 int buf_len, |
| 369 const CompletionCallback& callback) { | 384 const CompletionCallback& callback) { |
| 370 DCHECK(node_.Data()->dirty || read_only_); | 385 DCHECK(node_.Data()->dirty || read_only_); |
| 371 int result = InitSparseData(); | 386 int result = InitSparseData(); |
| 372 if (net::OK != result) | 387 if (net::OK != result) |
| 373 return result; | 388 return result; |
| 374 | 389 |
| 375 TimeTicks start = TimeTicks::Now(); | 390 TimeTicks start = TimeTicks::Now(); |
| 376 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, | 391 result = sparse_->StartIO( |
| 377 buf_len, callback); | 392 SparseControl::kWriteOperation, offset, buf, buf_len, callback); |
| 378 ReportIOTime(kSparseWrite, start); | 393 ReportIOTime(kSparseWrite, start); |
| 379 return result; | 394 return result; |
| 380 } | 395 } |
| 381 | 396 |
| 382 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { | 397 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { |
| 383 int result = InitSparseData(); | 398 int result = InitSparseData(); |
| 384 if (net::OK != result) | 399 if (net::OK != result) |
| 385 return result; | 400 return result; |
| 386 | 401 |
| 387 return sparse_->GetAvailableRange(offset, len, start); | 402 return sparse_->GetAvailableRange(offset, len, start); |
| 388 } | 403 } |
| 389 | 404 |
| 390 void EntryImpl::CancelSparseIOImpl() { | 405 void EntryImpl::CancelSparseIOImpl() { |
| 391 if (!sparse_.get()) | 406 if (!sparse_.get()) |
| 392 return; | 407 return; |
| 393 | 408 |
| 394 sparse_->CancelIO(); | 409 sparse_->CancelIO(); |
| 395 } | 410 } |
| 396 | 411 |
| 397 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { | 412 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { |
| 398 DCHECK(sparse_.get()); | 413 DCHECK(sparse_.get()); |
| 399 return sparse_->ReadyToUse(callback); | 414 return sparse_->ReadyToUse(callback); |
| 400 } | 415 } |
| 401 | 416 |
| 402 uint32 EntryImpl::GetHash() { | 417 uint32 EntryImpl::GetHash() { |
| 403 return entry_.Data()->hash; | 418 return entry_.Data()->hash; |
| 404 } | 419 } |
| 405 | 420 |
| 406 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, | 421 bool EntryImpl::CreateEntry(Addr node_address, |
| 422 const std::string& key, |
| 407 uint32 hash) { | 423 uint32 hash) { |
| 408 Trace("Create entry In"); | 424 Trace("Create entry In"); |
| 409 EntryStore* entry_store = entry_.Data(); | 425 EntryStore* entry_store = entry_.Data(); |
| 410 RankingsNode* node = node_.Data(); | 426 RankingsNode* node = node_.Data(); |
| 411 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); | 427 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); |
| 412 memset(node, 0, sizeof(RankingsNode)); | 428 memset(node, 0, sizeof(RankingsNode)); |
| 413 if (!node_.LazyInit(backend_->File(node_address), node_address)) | 429 if (!node_.LazyInit(backend_->File(node_address), node_address)) |
| 414 return false; | 430 return false; |
| 415 | 431 |
| 416 entry_store->rankings_node = node_address.value(); | 432 entry_store->rankings_node = node_address.value(); |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 476 SparseControl::DeleteChildren(this); | 492 SparseControl::DeleteChildren(this); |
| 477 } | 493 } |
| 478 | 494 |
| 479 if (GetDataSize(0)) | 495 if (GetDataSize(0)) |
| 480 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); | 496 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); |
| 481 if (GetDataSize(1)) | 497 if (GetDataSize(1)) |
| 482 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); | 498 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); |
| 483 for (int index = 0; index < kNumStreams; index++) { | 499 for (int index = 0; index < kNumStreams; index++) { |
| 484 Addr address(entry_.Data()->data_addr[index]); | 500 Addr address(entry_.Data()->data_addr[index]); |
| 485 if (address.is_initialized()) { | 501 if (address.is_initialized()) { |
| 486 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - | 502 backend_->ModifyStorageSize( |
| 487 unreported_size_[index], 0); | 503 entry_.Data()->data_size[index] - unreported_size_[index], 0); |
| 488 entry_.Data()->data_addr[index] = 0; | 504 entry_.Data()->data_addr[index] = 0; |
| 489 entry_.Data()->data_size[index] = 0; | 505 entry_.Data()->data_size[index] = 0; |
| 490 entry_.Store(); | 506 entry_.Store(); |
| 491 DeleteData(address, index); | 507 DeleteData(address, index); |
| 492 } | 508 } |
| 493 } | 509 } |
| 494 | 510 |
| 495 if (!everything) | 511 if (!everything) |
| 496 return; | 512 return; |
| 497 | 513 |
| (...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 717 case kWriteAsync1: | 733 case kWriteAsync1: |
| 718 CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start); | 734 CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start); |
| 719 break; | 735 break; |
| 720 default: | 736 default: |
| 721 NOTREACHED(); | 737 NOTREACHED(); |
| 722 } | 738 } |
| 723 } | 739 } |
| 724 | 740 |
| 725 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) { | 741 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) { |
| 726 DCHECK(!net_log_.net_log()); | 742 DCHECK(!net_log_.net_log()); |
| 727 net_log_ = net::BoundNetLog::Make( | 743 net_log_ = |
| 728 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY); | 744 net::BoundNetLog::Make(net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY); |
| 729 net_log_.BeginEvent( | 745 net_log_.BeginEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, |
| 730 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, | 746 CreateNetLogEntryCreationCallback(this, created)); |
| 731 CreateNetLogEntryCreationCallback(this, created)); | |
| 732 } | 747 } |
| 733 | 748 |
| 734 const net::BoundNetLog& EntryImpl::net_log() const { | 749 const net::BoundNetLog& EntryImpl::net_log() const { |
| 735 return net_log_; | 750 return net_log_; |
| 736 } | 751 } |
| 737 | 752 |
| 738 // static | 753 // static |
| 739 int EntryImpl::NumBlocksForEntry(int key_size) { | 754 int EntryImpl::NumBlocksForEntry(int key_size) { |
| 740 // The longest key that can be stored using one block. | 755 // The longest key that can be stored using one block. |
| 741 int key1_len = | 756 int key1_len = |
| (...skipping 28 matching lines...) Expand all Loading... |
| 770 if (!key_.empty()) | 785 if (!key_.empty()) |
| 771 return key_; | 786 return key_; |
| 772 | 787 |
| 773 Addr address(entry->Data()->long_key); | 788 Addr address(entry->Data()->long_key); |
| 774 DCHECK(address.is_initialized()); | 789 DCHECK(address.is_initialized()); |
| 775 size_t offset = 0; | 790 size_t offset = 0; |
| 776 if (address.is_block_file()) | 791 if (address.is_block_file()) |
| 777 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | 792 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 778 | 793 |
| 779 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); | 794 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); |
| 780 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address, | 795 File* key_file = |
| 781 kKeyFileIndex); | 796 const_cast<EntryImpl*>(this)->GetBackingFile(address, kKeyFileIndex); |
| 782 if (!key_file) | 797 if (!key_file) |
| 783 return std::string(); | 798 return std::string(); |
| 784 | 799 |
| 785 ++key_len; // We store a trailing \0 on disk that we read back below. | 800 ++key_len; // We store a trailing \0 on disk that we read back below. |
| 786 if (!offset && key_file->GetLength() != static_cast<size_t>(key_len)) | 801 if (!offset && key_file->GetLength() != static_cast<size_t>(key_len)) |
| 787 return std::string(); | 802 return std::string(); |
| 788 | 803 |
| 789 if (!key_file->Read(WriteInto(&key_, key_len), key_len, offset)) | 804 if (!key_file->Read(WriteInto(&key_, key_len), key_len, offset)) |
| 790 key_.clear(); | 805 key_.clear(); |
| 791 return key_; | 806 return key_; |
| (...skipping 10 matching lines...) Expand all Loading... |
| 802 } | 817 } |
| 803 | 818 |
| 804 int32 EntryImpl::GetDataSize(int index) const { | 819 int32 EntryImpl::GetDataSize(int index) const { |
| 805 if (index < 0 || index >= kNumStreams) | 820 if (index < 0 || index >= kNumStreams) |
| 806 return 0; | 821 return 0; |
| 807 | 822 |
| 808 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 823 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 809 return entry->Data()->data_size[index]; | 824 return entry->Data()->data_size[index]; |
| 810 } | 825 } |
| 811 | 826 |
| 812 int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, | 827 int EntryImpl::ReadData(int index, |
| 828 int offset, |
| 829 IOBuffer* buf, |
| 830 int buf_len, |
| 813 const CompletionCallback& callback) { | 831 const CompletionCallback& callback) { |
| 814 if (callback.is_null()) | 832 if (callback.is_null()) |
| 815 return ReadDataImpl(index, offset, buf, buf_len, callback); | 833 return ReadDataImpl(index, offset, buf, buf_len, callback); |
| 816 | 834 |
| 817 DCHECK(node_.Data()->dirty || read_only_); | 835 DCHECK(node_.Data()->dirty || read_only_); |
| 818 if (index < 0 || index >= kNumStreams) | 836 if (index < 0 || index >= kNumStreams) |
| 819 return net::ERR_INVALID_ARGUMENT; | 837 return net::ERR_INVALID_ARGUMENT; |
| 820 | 838 |
| 821 int entry_size = entry_.Data()->data_size[index]; | 839 int entry_size = entry_.Data()->data_size[index]; |
| 822 if (offset >= entry_size || offset < 0 || !buf_len) | 840 if (offset >= entry_size || offset < 0 || !buf_len) |
| 823 return 0; | 841 return 0; |
| 824 | 842 |
| 825 if (buf_len < 0) | 843 if (buf_len < 0) |
| 826 return net::ERR_INVALID_ARGUMENT; | 844 return net::ERR_INVALID_ARGUMENT; |
| 827 | 845 |
| 828 if (!background_queue_.get()) | 846 if (!background_queue_.get()) |
| 829 return net::ERR_UNEXPECTED; | 847 return net::ERR_UNEXPECTED; |
| 830 | 848 |
| 831 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); | 849 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); |
| 832 return net::ERR_IO_PENDING; | 850 return net::ERR_IO_PENDING; |
| 833 } | 851 } |
| 834 | 852 |
| 835 int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, | 853 int EntryImpl::WriteData(int index, |
| 836 const CompletionCallback& callback, bool truncate) { | 854 int offset, |
| 855 IOBuffer* buf, |
| 856 int buf_len, |
| 857 const CompletionCallback& callback, |
| 858 bool truncate) { |
| 837 if (callback.is_null()) | 859 if (callback.is_null()) |
| 838 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); | 860 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); |
| 839 | 861 |
| 840 DCHECK(node_.Data()->dirty || read_only_); | 862 DCHECK(node_.Data()->dirty || read_only_); |
| 841 if (index < 0 || index >= kNumStreams) | 863 if (index < 0 || index >= kNumStreams) |
| 842 return net::ERR_INVALID_ARGUMENT; | 864 return net::ERR_INVALID_ARGUMENT; |
| 843 | 865 |
| 844 if (offset < 0 || buf_len < 0) | 866 if (offset < 0 || buf_len < 0) |
| 845 return net::ERR_INVALID_ARGUMENT; | 867 return net::ERR_INVALID_ARGUMENT; |
| 846 | 868 |
| 847 if (!background_queue_.get()) | 869 if (!background_queue_.get()) |
| 848 return net::ERR_UNEXPECTED; | 870 return net::ERR_UNEXPECTED; |
| 849 | 871 |
| 850 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, | 872 background_queue_->WriteData( |
| 851 callback); | 873 this, index, offset, buf, buf_len, truncate, callback); |
| 852 return net::ERR_IO_PENDING; | 874 return net::ERR_IO_PENDING; |
| 853 } | 875 } |
| 854 | 876 |
| 855 int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, | 877 int EntryImpl::ReadSparseData(int64 offset, |
| 878 IOBuffer* buf, |
| 879 int buf_len, |
| 856 const CompletionCallback& callback) { | 880 const CompletionCallback& callback) { |
| 857 if (callback.is_null()) | 881 if (callback.is_null()) |
| 858 return ReadSparseDataImpl(offset, buf, buf_len, callback); | 882 return ReadSparseDataImpl(offset, buf, buf_len, callback); |
| 859 | 883 |
| 860 if (!background_queue_.get()) | 884 if (!background_queue_.get()) |
| 861 return net::ERR_UNEXPECTED; | 885 return net::ERR_UNEXPECTED; |
| 862 | 886 |
| 863 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); | 887 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); |
| 864 return net::ERR_IO_PENDING; | 888 return net::ERR_IO_PENDING; |
| 865 } | 889 } |
| 866 | 890 |
| 867 int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, | 891 int EntryImpl::WriteSparseData(int64 offset, |
| 892 IOBuffer* buf, |
| 893 int buf_len, |
| 868 const CompletionCallback& callback) { | 894 const CompletionCallback& callback) { |
| 869 if (callback.is_null()) | 895 if (callback.is_null()) |
| 870 return WriteSparseDataImpl(offset, buf, buf_len, callback); | 896 return WriteSparseDataImpl(offset, buf, buf_len, callback); |
| 871 | 897 |
| 872 if (!background_queue_.get()) | 898 if (!background_queue_.get()) |
| 873 return net::ERR_UNEXPECTED; | 899 return net::ERR_UNEXPECTED; |
| 874 | 900 |
| 875 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); | 901 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); |
| 876 return net::ERR_IO_PENDING; | 902 return net::ERR_IO_PENDING; |
| 877 } | 903 } |
| 878 | 904 |
| 879 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, | 905 int EntryImpl::GetAvailableRange(int64 offset, |
| 906 int len, |
| 907 int64* start, |
| 880 const CompletionCallback& callback) { | 908 const CompletionCallback& callback) { |
| 881 if (!background_queue_.get()) | 909 if (!background_queue_.get()) |
| 882 return net::ERR_UNEXPECTED; | 910 return net::ERR_UNEXPECTED; |
| 883 | 911 |
| 884 background_queue_->GetAvailableRange(this, offset, len, start, callback); | 912 background_queue_->GetAvailableRange(this, offset, len, start, callback); |
| 885 return net::ERR_IO_PENDING; | 913 return net::ERR_IO_PENDING; |
| 886 } | 914 } |
| 887 | 915 |
| 888 bool EntryImpl::CouldBeSparse() const { | 916 bool EntryImpl::CouldBeSparse() const { |
| 889 if (sparse_.get()) | 917 if (sparse_.get()) |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 963 } | 991 } |
| 964 } | 992 } |
| 965 | 993 |
| 966 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); | 994 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); |
| 967 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL); | 995 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL); |
| 968 backend_->OnEntryDestroyEnd(); | 996 backend_->OnEntryDestroyEnd(); |
| 969 } | 997 } |
| 970 | 998 |
| 971 // ------------------------------------------------------------------------ | 999 // ------------------------------------------------------------------------ |
| 972 | 1000 |
| 973 int EntryImpl::InternalReadData(int index, int offset, | 1001 int EntryImpl::InternalReadData(int index, |
| 974 IOBuffer* buf, int buf_len, | 1002 int offset, |
| 1003 IOBuffer* buf, |
| 1004 int buf_len, |
| 975 const CompletionCallback& callback) { | 1005 const CompletionCallback& callback) { |
| 976 DCHECK(node_.Data()->dirty || read_only_); | 1006 DCHECK(node_.Data()->dirty || read_only_); |
| 977 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; | 1007 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; |
| 978 if (index < 0 || index >= kNumStreams) | 1008 if (index < 0 || index >= kNumStreams) |
| 979 return net::ERR_INVALID_ARGUMENT; | 1009 return net::ERR_INVALID_ARGUMENT; |
| 980 | 1010 |
| 981 int entry_size = entry_.Data()->data_size[index]; | 1011 int entry_size = entry_.Data()->data_size[index]; |
| 982 if (offset >= entry_size || offset < 0 || !buf_len) | 1012 if (offset >= entry_size || offset < 0 || !buf_len) |
| 983 return 0; | 1013 return 0; |
| 984 | 1014 |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1018 File* file = GetBackingFile(address, index); | 1048 File* file = GetBackingFile(address, index); |
| 1019 if (!file) { | 1049 if (!file) { |
| 1020 DoomImpl(); | 1050 DoomImpl(); |
| 1021 LOG(ERROR) << "No file for " << std::hex << address.value(); | 1051 LOG(ERROR) << "No file for " << std::hex << address.value(); |
| 1022 return net::ERR_FILE_NOT_FOUND; | 1052 return net::ERR_FILE_NOT_FOUND; |
| 1023 } | 1053 } |
| 1024 | 1054 |
| 1025 size_t file_offset = offset; | 1055 size_t file_offset = offset; |
| 1026 if (address.is_block_file()) { | 1056 if (address.is_block_file()) { |
| 1027 DCHECK_LE(offset + buf_len, kMaxBlockSize); | 1057 DCHECK_LE(offset + buf_len, kMaxBlockSize); |
| 1028 file_offset += address.start_block() * address.BlockSize() + | 1058 file_offset += |
| 1029 kBlockHeaderSize; | 1059 address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 1030 } | 1060 } |
| 1031 | 1061 |
| 1032 SyncCallback* io_callback = NULL; | 1062 SyncCallback* io_callback = NULL; |
| 1033 if (!callback.is_null()) { | 1063 if (!callback.is_null()) { |
| 1034 io_callback = new SyncCallback(this, buf, callback, | 1064 io_callback = new SyncCallback( |
| 1035 net::NetLog::TYPE_ENTRY_READ_DATA); | 1065 this, buf, callback, net::NetLog::TYPE_ENTRY_READ_DATA); |
| 1036 } | 1066 } |
| 1037 | 1067 |
| 1038 TimeTicks start_async = TimeTicks::Now(); | 1068 TimeTicks start_async = TimeTicks::Now(); |
| 1039 | 1069 |
| 1040 bool completed; | 1070 bool completed; |
| 1041 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { | 1071 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { |
| 1042 if (io_callback) | 1072 if (io_callback) |
| 1043 io_callback->Discard(); | 1073 io_callback->Discard(); |
| 1044 DoomImpl(); | 1074 DoomImpl(); |
| 1045 return net::ERR_CACHE_READ_FAILURE; | 1075 return net::ERR_CACHE_READ_FAILURE; |
| 1046 } | 1076 } |
| 1047 | 1077 |
| 1048 if (io_callback && completed) | 1078 if (io_callback && completed) |
| 1049 io_callback->Discard(); | 1079 io_callback->Discard(); |
| 1050 | 1080 |
| 1051 if (io_callback) | 1081 if (io_callback) |
| 1052 ReportIOTime(kReadAsync1, start_async); | 1082 ReportIOTime(kReadAsync1, start_async); |
| 1053 | 1083 |
| 1054 ReportIOTime(kRead, start); | 1084 ReportIOTime(kRead, start); |
| 1055 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; | 1085 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; |
| 1056 } | 1086 } |
| 1057 | 1087 |
| 1058 int EntryImpl::InternalWriteData(int index, int offset, | 1088 int EntryImpl::InternalWriteData(int index, |
| 1059 IOBuffer* buf, int buf_len, | 1089 int offset, |
| 1090 IOBuffer* buf, |
| 1091 int buf_len, |
| 1060 const CompletionCallback& callback, | 1092 const CompletionCallback& callback, |
| 1061 bool truncate) { | 1093 bool truncate) { |
| 1062 DCHECK(node_.Data()->dirty || read_only_); | 1094 DCHECK(node_.Data()->dirty || read_only_); |
| 1063 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; | 1095 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; |
| 1064 if (index < 0 || index >= kNumStreams) | 1096 if (index < 0 || index >= kNumStreams) |
| 1065 return net::ERR_INVALID_ARGUMENT; | 1097 return net::ERR_INVALID_ARGUMENT; |
| 1066 | 1098 |
| 1067 if (offset < 0 || buf_len < 0) | 1099 if (offset < 0 || buf_len < 0) |
| 1068 return net::ERR_INVALID_ARGUMENT; | 1100 return net::ERR_INVALID_ARGUMENT; |
| 1069 | 1101 |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1116 return 0; | 1148 return 0; |
| 1117 } | 1149 } |
| 1118 | 1150 |
| 1119 File* file = GetBackingFile(address, index); | 1151 File* file = GetBackingFile(address, index); |
| 1120 if (!file) | 1152 if (!file) |
| 1121 return net::ERR_FILE_NOT_FOUND; | 1153 return net::ERR_FILE_NOT_FOUND; |
| 1122 | 1154 |
| 1123 size_t file_offset = offset; | 1155 size_t file_offset = offset; |
| 1124 if (address.is_block_file()) { | 1156 if (address.is_block_file()) { |
| 1125 DCHECK_LE(offset + buf_len, kMaxBlockSize); | 1157 DCHECK_LE(offset + buf_len, kMaxBlockSize); |
| 1126 file_offset += address.start_block() * address.BlockSize() + | 1158 file_offset += |
| 1127 kBlockHeaderSize; | 1159 address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 1128 } else if (truncate || (extending && !buf_len)) { | 1160 } else if (truncate || (extending && !buf_len)) { |
| 1129 if (!file->SetLength(offset + buf_len)) | 1161 if (!file->SetLength(offset + buf_len)) |
| 1130 return net::ERR_FAILED; | 1162 return net::ERR_FAILED; |
| 1131 } | 1163 } |
| 1132 | 1164 |
| 1133 if (!buf_len) | 1165 if (!buf_len) |
| 1134 return 0; | 1166 return 0; |
| 1135 | 1167 |
| 1136 SyncCallback* io_callback = NULL; | 1168 SyncCallback* io_callback = NULL; |
| 1137 if (!callback.is_null()) { | 1169 if (!callback.is_null()) { |
| 1138 io_callback = new SyncCallback(this, buf, callback, | 1170 io_callback = new SyncCallback( |
| 1139 net::NetLog::TYPE_ENTRY_WRITE_DATA); | 1171 this, buf, callback, net::NetLog::TYPE_ENTRY_WRITE_DATA); |
| 1140 } | 1172 } |
| 1141 | 1173 |
| 1142 TimeTicks start_async = TimeTicks::Now(); | 1174 TimeTicks start_async = TimeTicks::Now(); |
| 1143 | 1175 |
| 1144 bool completed; | 1176 bool completed; |
| 1145 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, | 1177 if (!file->Write( |
| 1146 &completed)) { | 1178 buf->data(), buf_len, file_offset, io_callback, &completed)) { |
| 1147 if (io_callback) | 1179 if (io_callback) |
| 1148 io_callback->Discard(); | 1180 io_callback->Discard(); |
| 1149 return net::ERR_CACHE_WRITE_FAILURE; | 1181 return net::ERR_CACHE_WRITE_FAILURE; |
| 1150 } | 1182 } |
| 1151 | 1183 |
| 1152 if (io_callback && completed) | 1184 if (io_callback && completed) |
| 1153 io_callback->Discard(); | 1185 io_callback->Discard(); |
| 1154 | 1186 |
| 1155 if (io_callback) | 1187 if (io_callback) |
| 1156 ReportIOTime(kWriteAsync1, start_async); | 1188 ReportIOTime(kWriteAsync1, start_async); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1200 // important that the entry doesn't keep a reference to this address, or we'll | 1232 // important that the entry doesn't keep a reference to this address, or we'll |
| 1201 // end up deleting the contents of |address| once again. | 1233 // end up deleting the contents of |address| once again. |
| 1202 void EntryImpl::DeleteData(Addr address, int index) { | 1234 void EntryImpl::DeleteData(Addr address, int index) { |
| 1203 DCHECK(backend_.get()); | 1235 DCHECK(backend_.get()); |
| 1204 if (!address.is_initialized()) | 1236 if (!address.is_initialized()) |
| 1205 return; | 1237 return; |
| 1206 if (address.is_separate_file()) { | 1238 if (address.is_separate_file()) { |
| 1207 int failure = !DeleteCacheFile(backend_->GetFileName(address)); | 1239 int failure = !DeleteCacheFile(backend_->GetFileName(address)); |
| 1208 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); | 1240 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); |
| 1209 if (failure) { | 1241 if (failure) { |
| 1210 LOG(ERROR) << "Failed to delete " << | 1242 LOG(ERROR) << "Failed to delete " << backend_->GetFileName(address) |
| 1211 backend_->GetFileName(address).value() << " from the cache."; | 1243 .value() << " from the cache."; |
| 1212 } | 1244 } |
| 1213 if (files_[index].get()) | 1245 if (files_[index].get()) |
| 1214 files_[index] = NULL; // Releases the object. | 1246 files_[index] = NULL; // Releases the object. |
| 1215 } else { | 1247 } else { |
| 1216 backend_->DeleteBlock(address, true); | 1248 backend_->DeleteBlock(address, true); |
| 1217 } | 1249 } |
| 1218 } | 1250 } |
| 1219 | 1251 |
| 1220 void EntryImpl::UpdateRank(bool modified) { | 1252 void EntryImpl::UpdateRank(bool modified) { |
| 1221 if (!backend_.get()) | 1253 if (!backend_.get()) |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1263 // buffer and / or the files needed to store the data. | 1295 // buffer and / or the files needed to store the data. |
| 1264 // | 1296 // |
| 1265 // In general, a buffer may overlap data already stored on disk, and in that | 1297 // In general, a buffer may overlap data already stored on disk, and in that |
| 1266 // case, the contents of the buffer are the most accurate. It may also extend | 1298 // case, the contents of the buffer are the most accurate. It may also extend |
| 1267 // the file, but we don't want to read from disk just to keep the buffer up to | 1299 // the file, but we don't want to read from disk just to keep the buffer up to |
| 1268 // date. This means that as soon as there is a chance to get confused about what | 1300 // date. This means that as soon as there is a chance to get confused about what |
| 1269 // is the most recent version of some part of a file, we'll flush the buffer and | 1301 // is the most recent version of some part of a file, we'll flush the buffer and |
| 1270 // reuse it for the new data. Keep in mind that the normal use pattern is quite | 1302 // reuse it for the new data. Keep in mind that the normal use pattern is quite |
| 1271 // simple (write sequentially from the beginning), so we optimize for handling | 1303 // simple (write sequentially from the beginning), so we optimize for handling |
| 1272 // that case. | 1304 // that case. |
| 1273 bool EntryImpl::PrepareTarget(int index, int offset, int buf_len, | 1305 bool EntryImpl::PrepareTarget(int index, |
| 1306 int offset, |
| 1307 int buf_len, |
| 1274 bool truncate) { | 1308 bool truncate) { |
| 1275 if (truncate) | 1309 if (truncate) |
| 1276 return HandleTruncation(index, offset, buf_len); | 1310 return HandleTruncation(index, offset, buf_len); |
| 1277 | 1311 |
| 1278 if (!offset && !buf_len) | 1312 if (!offset && !buf_len) |
| 1279 return true; | 1313 return true; |
| 1280 | 1314 |
| 1281 Addr address(entry_.Data()->data_addr[index]); | 1315 Addr address(entry_.Data()->data_addr[index]); |
| 1282 if (address.is_initialized()) { | 1316 if (address.is_initialized()) { |
| 1283 if (address.is_block_file() && !MoveToLocalBuffer(index)) | 1317 if (address.is_block_file() && !MoveToLocalBuffer(index)) |
| (...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1521 return; | 1555 return; |
| 1522 } | 1556 } |
| 1523 } | 1557 } |
| 1524 | 1558 |
| 1525 // Bad news: we'd have to read the info from disk so instead we'll just tell | 1559 // Bad news: we'd have to read the info from disk so instead we'll just tell |
| 1526 // the caller where to read from. | 1560 // the caller where to read from. |
| 1527 *buffer = NULL; | 1561 *buffer = NULL; |
| 1528 address->set_value(entry_.Data()->data_addr[index]); | 1562 address->set_value(entry_.Data()->data_addr[index]); |
| 1529 if (address->is_initialized()) { | 1563 if (address->is_initialized()) { |
| 1530 // Prevent us from deleting the block from the backing store. | 1564 // Prevent us from deleting the block from the backing store. |
| 1531 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - | 1565 backend_->ModifyStorageSize( |
| 1532 unreported_size_[index], 0); | 1566 entry_.Data()->data_size[index] - unreported_size_[index], 0); |
| 1533 entry_.Data()->data_addr[index] = 0; | 1567 entry_.Data()->data_addr[index] = 0; |
| 1534 entry_.Data()->data_size[index] = 0; | 1568 entry_.Data()->data_size[index] = 0; |
| 1535 } | 1569 } |
| 1536 } | 1570 } |
| 1537 | 1571 |
| 1538 void EntryImpl::Log(const char* msg) { | 1572 void EntryImpl::Log(const char* msg) { |
| 1539 int dirty = 0; | 1573 int dirty = 0; |
| 1540 if (node_.HasData()) { | 1574 if (node_.HasData()) { |
| 1541 dirty = node_.Data()->dirty; | 1575 dirty = node_.Data()->dirty; |
| 1542 } | 1576 } |
| 1543 | 1577 |
| 1544 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | 1578 Trace("%s 0x%p 0x%x 0x%x", |
| 1545 entry_.address().value(), node_.address().value()); | 1579 msg, |
| 1580 reinterpret_cast<void*>(this), |
| 1581 entry_.address().value(), |
| 1582 node_.address().value()); |
| 1546 | 1583 |
| 1547 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | 1584 Trace(" data: 0x%x 0x%x 0x%x", |
| 1548 entry_.Data()->data_addr[1], entry_.Data()->long_key); | 1585 entry_.Data()->data_addr[0], |
| 1586 entry_.Data()->data_addr[1], |
| 1587 entry_.Data()->long_key); |
| 1549 | 1588 |
| 1550 Trace(" doomed: %d 0x%x", doomed_, dirty); | 1589 Trace(" doomed: %d 0x%x", doomed_, dirty); |
| 1551 } | 1590 } |
| 1552 | 1591 |
| 1553 } // namespace disk_cache | 1592 } // namespace disk_cache |
| OLD | NEW |