| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/blockfile/entry_impl.h" | 5 #include "net/disk_cache/blockfile/entry_impl.h" |
| 6 | 6 |
| 7 #include <limits> |
| 8 |
| 7 #include "base/hash.h" | 9 #include "base/hash.h" |
| 8 #include "base/message_loop/message_loop.h" | 10 #include "base/message_loop/message_loop.h" |
| 9 #include "base/strings/string_util.h" | 11 #include "base/strings/string_util.h" |
| 10 #include "net/base/io_buffer.h" | 12 #include "net/base/io_buffer.h" |
| 11 #include "net/base/net_errors.h" | 13 #include "net/base/net_errors.h" |
| 12 #include "net/disk_cache/blockfile/backend_impl.h" | 14 #include "net/disk_cache/blockfile/backend_impl.h" |
| 13 #include "net/disk_cache/blockfile/bitmap.h" | 15 #include "net/disk_cache/blockfile/bitmap.h" |
| 14 #include "net/disk_cache/blockfile/disk_format.h" | 16 #include "net/disk_cache/blockfile/disk_format.h" |
| 15 #include "net/disk_cache/blockfile/histogram_macros.h" | 17 #include "net/disk_cache/blockfile/histogram_macros.h" |
| 16 #include "net/disk_cache/blockfile/sparse_control.h" | 18 #include "net/disk_cache/blockfile/sparse_control.h" |
| (...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 343 truncate); | 345 truncate); |
| 344 | 346 |
| 345 if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) { | 347 if (result != net::ERR_IO_PENDING && net_log_.IsCapturing()) { |
| 346 net_log_.EndEvent( | 348 net_log_.EndEvent( |
| 347 net::NetLog::TYPE_ENTRY_WRITE_DATA, | 349 net::NetLog::TYPE_ENTRY_WRITE_DATA, |
| 348 CreateNetLogReadWriteCompleteCallback(result)); | 350 CreateNetLogReadWriteCompleteCallback(result)); |
| 349 } | 351 } |
| 350 return result; | 352 return result; |
| 351 } | 353 } |
| 352 | 354 |
| 353 int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, | 355 int EntryImpl::ReadSparseDataImpl(int64_t offset, |
| 356 IOBuffer* buf, |
| 357 int buf_len, |
| 354 const CompletionCallback& callback) { | 358 const CompletionCallback& callback) { |
| 355 DCHECK(node_.Data()->dirty || read_only_); | 359 DCHECK(node_.Data()->dirty || read_only_); |
| 356 int result = InitSparseData(); | 360 int result = InitSparseData(); |
| 357 if (net::OK != result) | 361 if (net::OK != result) |
| 358 return result; | 362 return result; |
| 359 | 363 |
| 360 TimeTicks start = TimeTicks::Now(); | 364 TimeTicks start = TimeTicks::Now(); |
| 361 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, | 365 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, |
| 362 callback); | 366 callback); |
| 363 ReportIOTime(kSparseRead, start); | 367 ReportIOTime(kSparseRead, start); |
| 364 return result; | 368 return result; |
| 365 } | 369 } |
| 366 | 370 |
| 367 int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, | 371 int EntryImpl::WriteSparseDataImpl(int64_t offset, |
| 372 IOBuffer* buf, |
| 373 int buf_len, |
| 368 const CompletionCallback& callback) { | 374 const CompletionCallback& callback) { |
| 369 DCHECK(node_.Data()->dirty || read_only_); | 375 DCHECK(node_.Data()->dirty || read_only_); |
| 370 int result = InitSparseData(); | 376 int result = InitSparseData(); |
| 371 if (net::OK != result) | 377 if (net::OK != result) |
| 372 return result; | 378 return result; |
| 373 | 379 |
| 374 TimeTicks start = TimeTicks::Now(); | 380 TimeTicks start = TimeTicks::Now(); |
| 375 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, | 381 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, |
| 376 buf_len, callback); | 382 buf_len, callback); |
| 377 ReportIOTime(kSparseWrite, start); | 383 ReportIOTime(kSparseWrite, start); |
| 378 return result; | 384 return result; |
| 379 } | 385 } |
| 380 | 386 |
| 381 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { | 387 int EntryImpl::GetAvailableRangeImpl(int64_t offset, int len, int64_t* start) { |
| 382 int result = InitSparseData(); | 388 int result = InitSparseData(); |
| 383 if (net::OK != result) | 389 if (net::OK != result) |
| 384 return result; | 390 return result; |
| 385 | 391 |
| 386 return sparse_->GetAvailableRange(offset, len, start); | 392 return sparse_->GetAvailableRange(offset, len, start); |
| 387 } | 393 } |
| 388 | 394 |
| 389 void EntryImpl::CancelSparseIOImpl() { | 395 void EntryImpl::CancelSparseIOImpl() { |
| 390 if (!sparse_.get()) | 396 if (!sparse_.get()) |
| 391 return; | 397 return; |
| 392 | 398 |
| 393 sparse_->CancelIO(); | 399 sparse_->CancelIO(); |
| 394 } | 400 } |
| 395 | 401 |
| 396 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { | 402 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { |
| 397 DCHECK(sparse_.get()); | 403 DCHECK(sparse_.get()); |
| 398 return sparse_->ReadyToUse(callback); | 404 return sparse_->ReadyToUse(callback); |
| 399 } | 405 } |
| 400 | 406 |
| 401 uint32 EntryImpl::GetHash() { | 407 uint32_t EntryImpl::GetHash() { |
| 402 return entry_.Data()->hash; | 408 return entry_.Data()->hash; |
| 403 } | 409 } |
| 404 | 410 |
| 405 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, | 411 bool EntryImpl::CreateEntry(Addr node_address, |
| 406 uint32 hash) { | 412 const std::string& key, |
| 413 uint32_t hash) { |
| 407 Trace("Create entry In"); | 414 Trace("Create entry In"); |
| 408 EntryStore* entry_store = entry_.Data(); | 415 EntryStore* entry_store = entry_.Data(); |
| 409 RankingsNode* node = node_.Data(); | 416 RankingsNode* node = node_.Data(); |
| 410 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); | 417 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); |
| 411 memset(node, 0, sizeof(RankingsNode)); | 418 memset(node, 0, sizeof(RankingsNode)); |
| 412 if (!node_.LazyInit(backend_->File(node_address), node_address)) | 419 if (!node_.LazyInit(backend_->File(node_address), node_address)) |
| 413 return false; | 420 return false; |
| 414 | 421 |
| 415 entry_store->rankings_node = node_address.value(); | 422 entry_store->rankings_node = node_address.value(); |
| 416 node->contents = entry_.address().value(); | 423 node->contents = entry_.address().value(); |
| 417 | 424 |
| 418 entry_store->hash = hash; | 425 entry_store->hash = hash; |
| 419 entry_store->creation_time = Time::Now().ToInternalValue(); | 426 entry_store->creation_time = Time::Now().ToInternalValue(); |
| 420 entry_store->key_len = static_cast<int32>(key.size()); | 427 entry_store->key_len = static_cast<int32_t>(key.size()); |
| 421 if (entry_store->key_len > kMaxInternalKeyLength) { | 428 if (entry_store->key_len > kMaxInternalKeyLength) { |
| 422 Addr address(0); | 429 Addr address(0); |
| 423 if (!CreateBlock(entry_store->key_len + 1, &address)) | 430 if (!CreateBlock(entry_store->key_len + 1, &address)) |
| 424 return false; | 431 return false; |
| 425 | 432 |
| 426 entry_store->long_key = address.value(); | 433 entry_store->long_key = address.value(); |
| 427 File* key_file = GetBackingFile(address, kKeyFileIndex); | 434 File* key_file = GetBackingFile(address, kKeyFileIndex); |
| 428 key_ = key; | 435 key_ = key; |
| 429 | 436 |
| 430 size_t offset = 0; | 437 size_t offset = 0; |
| 431 if (address.is_block_file()) | 438 if (address.is_block_file()) |
| 432 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; | 439 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; |
| 433 | 440 |
| 434 if (!key_file || !key_file->Write(key.data(), key.size(), offset)) { | 441 if (!key_file || !key_file->Write(key.data(), key.size(), offset)) { |
| 435 DeleteData(address, kKeyFileIndex); | 442 DeleteData(address, kKeyFileIndex); |
| 436 return false; | 443 return false; |
| 437 } | 444 } |
| 438 | 445 |
| 439 if (address.is_separate_file()) | 446 if (address.is_separate_file()) |
| 440 key_file->SetLength(key.size() + 1); | 447 key_file->SetLength(key.size() + 1); |
| 441 } else { | 448 } else { |
| 442 memcpy(entry_store->key, key.data(), key.size()); | 449 memcpy(entry_store->key, key.data(), key.size()); |
| 443 entry_store->key[key.size()] = '\0'; | 450 entry_store->key[key.size()] = '\0'; |
| 444 } | 451 } |
| 445 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); | 452 backend_->ModifyStorageSize(0, static_cast<int32_t>(key.size())); |
| 446 CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size())); | 453 CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32_t>(key.size())); |
| 447 node->dirty = backend_->GetCurrentEntryId(); | 454 node->dirty = backend_->GetCurrentEntryId(); |
| 448 Log("Create Entry "); | 455 Log("Create Entry "); |
| 449 return true; | 456 return true; |
| 450 } | 457 } |
| 451 | 458 |
| 452 bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) { | 459 bool EntryImpl::IsSameEntry(const std::string& key, uint32_t hash) { |
| 453 if (entry_.Data()->hash != hash || | 460 if (entry_.Data()->hash != hash || |
| 454 static_cast<size_t>(entry_.Data()->key_len) != key.size()) | 461 static_cast<size_t>(entry_.Data()->key_len) != key.size()) |
| 455 return false; | 462 return false; |
| 456 | 463 |
| 457 return (key.compare(GetKey()) == 0); | 464 return (key.compare(GetKey()) == 0); |
| 458 } | 465 } |
| 459 | 466 |
| 460 void EntryImpl::InternalDoom() { | 467 void EntryImpl::InternalDoom() { |
| 461 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); | 468 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); |
| 462 DCHECK(node_.HasData()); | 469 DCHECK(node_.HasData()); |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 539 | 546 |
| 540 RankingsNode* rankings = node_.Data(); | 547 RankingsNode* rankings = node_.Data(); |
| 541 if (!rankings->dirty) { | 548 if (!rankings->dirty) { |
| 542 rankings->dirty = backend_->GetCurrentEntryId(); | 549 rankings->dirty = backend_->GetCurrentEntryId(); |
| 543 if (!node_.Store()) | 550 if (!node_.Store()) |
| 544 return false; | 551 return false; |
| 545 } | 552 } |
| 546 return true; | 553 return true; |
| 547 } | 554 } |
| 548 | 555 |
| 549 void EntryImpl::SetDirtyFlag(int32 current_id) { | 556 void EntryImpl::SetDirtyFlag(int32_t current_id) { |
| 550 DCHECK(node_.HasData()); | 557 DCHECK(node_.HasData()); |
| 551 if (node_.Data()->dirty && current_id != node_.Data()->dirty) | 558 if (node_.Data()->dirty && current_id != node_.Data()->dirty) |
| 552 dirty_ = true; | 559 dirty_ = true; |
| 553 | 560 |
| 554 if (!current_id) | 561 if (!current_id) |
| 555 dirty_ = true; | 562 dirty_ = true; |
| 556 } | 563 } |
| 557 | 564 |
| 558 void EntryImpl::SetPointerForInvalidEntry(int32 new_id) { | 565 void EntryImpl::SetPointerForInvalidEntry(int32_t new_id) { |
| 559 node_.Data()->dirty = new_id; | 566 node_.Data()->dirty = new_id; |
| 560 node_.Store(); | 567 node_.Store(); |
| 561 } | 568 } |
| 562 | 569 |
| 563 bool EntryImpl::LeaveRankingsBehind() { | 570 bool EntryImpl::LeaveRankingsBehind() { |
| 564 return !node_.Data()->contents; | 571 return !node_.Data()->contents; |
| 565 } | 572 } |
| 566 | 573 |
| 567 // This only includes checks that relate to the first block of the entry (the | 574 // This only includes checks that relate to the first block of the entry (the |
| 568 // first 256 bytes), and values that should be set from the entry creation. | 575 // first 256 bytes), and values that should be set from the entry creation. |
| (...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 793 Time EntryImpl::GetLastUsed() const { | 800 Time EntryImpl::GetLastUsed() const { |
| 794 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | 801 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); |
| 795 return Time::FromInternalValue(node->Data()->last_used); | 802 return Time::FromInternalValue(node->Data()->last_used); |
| 796 } | 803 } |
| 797 | 804 |
| 798 Time EntryImpl::GetLastModified() const { | 805 Time EntryImpl::GetLastModified() const { |
| 799 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); | 806 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); |
| 800 return Time::FromInternalValue(node->Data()->last_modified); | 807 return Time::FromInternalValue(node->Data()->last_modified); |
| 801 } | 808 } |
| 802 | 809 |
| 803 int32 EntryImpl::GetDataSize(int index) const { | 810 int32_t EntryImpl::GetDataSize(int index) const { |
| 804 if (index < 0 || index >= kNumStreams) | 811 if (index < 0 || index >= kNumStreams) |
| 805 return 0; | 812 return 0; |
| 806 | 813 |
| 807 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 814 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 808 return entry->Data()->data_size[index]; | 815 return entry->Data()->data_size[index]; |
| 809 } | 816 } |
| 810 | 817 |
| 811 int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, | 818 int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len, |
| 812 const CompletionCallback& callback) { | 819 const CompletionCallback& callback) { |
| 813 if (callback.is_null()) | 820 if (callback.is_null()) |
| (...skipping 30 matching lines...) Expand all Loading... |
| 844 return net::ERR_INVALID_ARGUMENT; | 851 return net::ERR_INVALID_ARGUMENT; |
| 845 | 852 |
| 846 if (!background_queue_.get()) | 853 if (!background_queue_.get()) |
| 847 return net::ERR_UNEXPECTED; | 854 return net::ERR_UNEXPECTED; |
| 848 | 855 |
| 849 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, | 856 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, |
| 850 callback); | 857 callback); |
| 851 return net::ERR_IO_PENDING; | 858 return net::ERR_IO_PENDING; |
| 852 } | 859 } |
| 853 | 860 |
| 854 int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, | 861 int EntryImpl::ReadSparseData(int64_t offset, |
| 862 IOBuffer* buf, |
| 863 int buf_len, |
| 855 const CompletionCallback& callback) { | 864 const CompletionCallback& callback) { |
| 856 if (callback.is_null()) | 865 if (callback.is_null()) |
| 857 return ReadSparseDataImpl(offset, buf, buf_len, callback); | 866 return ReadSparseDataImpl(offset, buf, buf_len, callback); |
| 858 | 867 |
| 859 if (!background_queue_.get()) | 868 if (!background_queue_.get()) |
| 860 return net::ERR_UNEXPECTED; | 869 return net::ERR_UNEXPECTED; |
| 861 | 870 |
| 862 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); | 871 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); |
| 863 return net::ERR_IO_PENDING; | 872 return net::ERR_IO_PENDING; |
| 864 } | 873 } |
| 865 | 874 |
| 866 int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, | 875 int EntryImpl::WriteSparseData(int64_t offset, |
| 876 IOBuffer* buf, |
| 877 int buf_len, |
| 867 const CompletionCallback& callback) { | 878 const CompletionCallback& callback) { |
| 868 if (callback.is_null()) | 879 if (callback.is_null()) |
| 869 return WriteSparseDataImpl(offset, buf, buf_len, callback); | 880 return WriteSparseDataImpl(offset, buf, buf_len, callback); |
| 870 | 881 |
| 871 if (!background_queue_.get()) | 882 if (!background_queue_.get()) |
| 872 return net::ERR_UNEXPECTED; | 883 return net::ERR_UNEXPECTED; |
| 873 | 884 |
| 874 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); | 885 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); |
| 875 return net::ERR_IO_PENDING; | 886 return net::ERR_IO_PENDING; |
| 876 } | 887 } |
| 877 | 888 |
| 878 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, | 889 int EntryImpl::GetAvailableRange(int64_t offset, |
| 890 int len, |
| 891 int64_t* start, |
| 879 const CompletionCallback& callback) { | 892 const CompletionCallback& callback) { |
| 880 if (!background_queue_.get()) | 893 if (!background_queue_.get()) |
| 881 return net::ERR_UNEXPECTED; | 894 return net::ERR_UNEXPECTED; |
| 882 | 895 |
| 883 background_queue_->GetAvailableRange(this, offset, len, start, callback); | 896 background_queue_->GetAvailableRange(this, offset, len, start, callback); |
| 884 return net::ERR_IO_PENDING; | 897 return net::ERR_IO_PENDING; |
| 885 } | 898 } |
| 886 | 899 |
| 887 bool EntryImpl::CouldBeSparse() const { | 900 bool EntryImpl::CouldBeSparse() const { |
| 888 if (sparse_.get()) | 901 if (sparse_.get()) |
| (...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1070 if (!backend_.get()) | 1083 if (!backend_.get()) |
| 1071 return net::ERR_UNEXPECTED; | 1084 return net::ERR_UNEXPECTED; |
| 1072 | 1085 |
| 1073 int max_file_size = backend_->MaxFileSize(); | 1086 int max_file_size = backend_->MaxFileSize(); |
| 1074 | 1087 |
| 1075 // offset or buf_len could be negative numbers. | 1088 // offset or buf_len could be negative numbers. |
| 1076 if (offset > max_file_size || buf_len > max_file_size || | 1089 if (offset > max_file_size || buf_len > max_file_size || |
| 1077 offset + buf_len > max_file_size) { | 1090 offset + buf_len > max_file_size) { |
| 1078 int size = offset + buf_len; | 1091 int size = offset + buf_len; |
| 1079 if (size <= max_file_size) | 1092 if (size <= max_file_size) |
| 1080 size = kint32max; | 1093 size = std::numeric_limits<int32_t>::max(); |
| 1081 backend_->TooMuchStorageRequested(size); | 1094 backend_->TooMuchStorageRequested(size); |
| 1082 return net::ERR_FAILED; | 1095 return net::ERR_FAILED; |
| 1083 } | 1096 } |
| 1084 | 1097 |
| 1085 TimeTicks start = TimeTicks::Now(); | 1098 TimeTicks start = TimeTicks::Now(); |
| 1086 | 1099 |
| 1087 // Read the size at this point (it may change inside prepare). | 1100 // Read the size at this point (it may change inside prepare). |
| 1088 int entry_size = entry_.Data()->data_size[index]; | 1101 int entry_size = entry_.Data()->data_size[index]; |
| 1089 bool extending = entry_size < offset + buf_len; | 1102 bool extending = entry_size < offset + buf_len; |
| 1090 truncate = truncate && entry_size > offset + buf_len; | 1103 truncate = truncate && entry_size > offset + buf_len; |
| (...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1492 | 1505 |
| 1493 // Use a local variable so that sparse_ never goes from 'valid' to NULL. | 1506 // Use a local variable so that sparse_ never goes from 'valid' to NULL. |
| 1494 scoped_ptr<SparseControl> sparse(new SparseControl(this)); | 1507 scoped_ptr<SparseControl> sparse(new SparseControl(this)); |
| 1495 int result = sparse->Init(); | 1508 int result = sparse->Init(); |
| 1496 if (net::OK == result) | 1509 if (net::OK == result) |
| 1497 sparse_.swap(sparse); | 1510 sparse_.swap(sparse); |
| 1498 | 1511 |
| 1499 return result; | 1512 return result; |
| 1500 } | 1513 } |
| 1501 | 1514 |
| 1502 void EntryImpl::SetEntryFlags(uint32 flags) { | 1515 void EntryImpl::SetEntryFlags(uint32_t flags) { |
| 1503 entry_.Data()->flags |= flags; | 1516 entry_.Data()->flags |= flags; |
| 1504 entry_.set_modified(); | 1517 entry_.set_modified(); |
| 1505 } | 1518 } |
| 1506 | 1519 |
| 1507 uint32 EntryImpl::GetEntryFlags() { | 1520 uint32_t EntryImpl::GetEntryFlags() { |
| 1508 return entry_.Data()->flags; | 1521 return entry_.Data()->flags; |
| 1509 } | 1522 } |
| 1510 | 1523 |
| 1511 void EntryImpl::GetData(int index, char** buffer, Addr* address) { | 1524 void EntryImpl::GetData(int index, char** buffer, Addr* address) { |
| 1512 DCHECK(backend_.get()); | 1525 DCHECK(backend_.get()); |
| 1513 if (user_buffers_[index].get() && user_buffers_[index]->Size() && | 1526 if (user_buffers_[index].get() && user_buffers_[index]->Size() && |
| 1514 !user_buffers_[index]->Start()) { | 1527 !user_buffers_[index]->Start()) { |
| 1515 // The data is already in memory, just copy it and we're done. | 1528 // The data is already in memory, just copy it and we're done. |
| 1516 int data_len = entry_.Data()->data_size[index]; | 1529 int data_len = entry_.Data()->data_size[index]; |
| 1517 if (data_len <= user_buffers_[index]->Size()) { | 1530 if (data_len <= user_buffers_[index]->Size()) { |
| (...skipping 26 matching lines...) Expand all Loading... |
| 1544 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | 1557 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), |
| 1545 entry_.address().value(), node_.address().value()); | 1558 entry_.address().value(), node_.address().value()); |
| 1546 | 1559 |
| 1547 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | 1560 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], |
| 1548 entry_.Data()->data_addr[1], entry_.Data()->long_key); | 1561 entry_.Data()->data_addr[1], entry_.Data()->long_key); |
| 1549 | 1562 |
| 1550 Trace(" doomed: %d 0x%x", doomed_, dirty); | 1563 Trace(" doomed: %d 0x%x", doomed_, dirty); |
| 1551 } | 1564 } |
| 1552 | 1565 |
| 1553 } // namespace disk_cache | 1566 } // namespace disk_cache |
| OLD | NEW |