| OLD | NEW |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/simple/simple_index.h" | 5 #include "net/disk_cache/simple/simple_index.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <limits> | 8 #include <limits> |
| 9 #include <string> | 9 #include <string> |
| 10 #include <utility> | 10 #include <utility> |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 56 // estimate of the filesystem overhead, but it also serves to flatten | 56 // estimate of the filesystem overhead, but it also serves to flatten |
| 57 // the curve so that 1-byte entries and 2-byte entries are basically | 57 // the curve so that 1-byte entries and 2-byte entries are basically |
| 58 // treated the same. | 58 // treated the same. |
| 59 static const int kEstimatedEntryOverhead = 512; | 59 static const int kEstimatedEntryOverhead = 512; |
| 60 | 60 |
| 61 } // namespace | 61 } // namespace |
| 62 | 62 |
| 63 namespace disk_cache { | 63 namespace disk_cache { |
| 64 | 64 |
| 65 EntryMetadata::EntryMetadata() | 65 EntryMetadata::EntryMetadata() |
| 66 : last_used_time_seconds_since_epoch_(0), | 66 : last_used_time_seconds_since_epoch_(0), |
| 67 entry_size_(0) { | 67 entry_size_256b_chunks_(0), |
| 68 } | 68 in_memory_data_(0) {} |
| 69 | 69 |
| 70 EntryMetadata::EntryMetadata(base::Time last_used_time, | 70 EntryMetadata::EntryMetadata(base::Time last_used_time, |
| 71 base::StrictNumeric<uint32_t> entry_size) | 71 base::StrictNumeric<uint32_t> entry_size) |
| 72 : last_used_time_seconds_since_epoch_(0), entry_size_(entry_size) { | 72 : last_used_time_seconds_since_epoch_(0), |
| 73 entry_size_256b_chunks_(0), |
| 74 in_memory_data_(0) { |
| 75 SetEntrySize(entry_size); // to round/pack properly. |
| 73 SetLastUsedTime(last_used_time); | 76 SetLastUsedTime(last_used_time); |
| 74 } | 77 } |
| 75 | 78 |
| 76 base::Time EntryMetadata::GetLastUsedTime() const { | 79 base::Time EntryMetadata::GetLastUsedTime() const { |
| 77 // Preserve nullity. | 80 // Preserve nullity. |
| 78 if (last_used_time_seconds_since_epoch_ == 0) | 81 if (last_used_time_seconds_since_epoch_ == 0) |
| 79 return base::Time(); | 82 return base::Time(); |
| 80 | 83 |
| 81 return base::Time::UnixEpoch() + | 84 return base::Time::UnixEpoch() + |
| 82 base::TimeDelta::FromSeconds(last_used_time_seconds_since_epoch_); | 85 base::TimeDelta::FromSeconds(last_used_time_seconds_since_epoch_); |
| 83 } | 86 } |
| 84 | 87 |
| 85 void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) { | 88 void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) { |
| 86 // Preserve nullity. | 89 // Preserve nullity. |
| 87 if (last_used_time.is_null()) { | 90 if (last_used_time.is_null()) { |
| 88 last_used_time_seconds_since_epoch_ = 0; | 91 last_used_time_seconds_since_epoch_ = 0; |
| 89 return; | 92 return; |
| 90 } | 93 } |
| 91 | 94 |
| 92 last_used_time_seconds_since_epoch_ = base::saturated_cast<uint32_t>( | 95 last_used_time_seconds_since_epoch_ = base::saturated_cast<uint32_t>( |
| 93 (last_used_time - base::Time::UnixEpoch()).InSeconds()); | 96 (last_used_time - base::Time::UnixEpoch()).InSeconds()); |
| 94 // Avoid accidental nullity. | 97 // Avoid accidental nullity. |
| 95 if (last_used_time_seconds_since_epoch_ == 0) | 98 if (last_used_time_seconds_since_epoch_ == 0) |
| 96 last_used_time_seconds_since_epoch_ = 1; | 99 last_used_time_seconds_since_epoch_ = 1; |
| 97 } | 100 } |
| 98 | 101 |
| 99 uint32_t EntryMetadata::GetEntrySize() const { | 102 uint32_t EntryMetadata::GetEntrySize() const { |
| 100 return entry_size_; | 103 return entry_size_256b_chunks_ << 8; |
| 101 } | 104 } |
| 102 | 105 |
| 103 void EntryMetadata::SetEntrySize(base::StrictNumeric<uint32_t> entry_size) { | 106 void EntryMetadata::SetEntrySize(base::StrictNumeric<uint32_t> entry_size) { |
| 104 entry_size_ = entry_size; | 107 // This should not overflow since we limit entries to 1/8th of the cache. |
| 108 entry_size_256b_chunks_ = (static_cast<uint32_t>(entry_size) + 255) >> 8; |
| 105 } | 109 } |
| 106 | 110 |
| 107 void EntryMetadata::Serialize(base::Pickle* pickle) const { | 111 void EntryMetadata::Serialize(base::Pickle* pickle) const { |
| 108 DCHECK(pickle); | 112 DCHECK(pickle); |
| 109 int64_t internal_last_used_time = GetLastUsedTime().ToInternalValue(); | 113 int64_t internal_last_used_time = GetLastUsedTime().ToInternalValue(); |
| 110 // If you modify the size of the size of the pickle, be sure to update | 114 // If you modify the size of the size of the pickle, be sure to update |
| 111 // kOnDiskSizeBytes. | 115 // kOnDiskSizeBytes. |
| 116 uint32_t packed_entry_info = (entry_size_256b_chunks_ << 8) | in_memory_data_; |
| 112 pickle->WriteInt64(internal_last_used_time); | 117 pickle->WriteInt64(internal_last_used_time); |
| 113 pickle->WriteUInt64(entry_size_); | 118 pickle->WriteUInt64(packed_entry_info); |
| 114 } | 119 } |
| 115 | 120 |
| 116 bool EntryMetadata::Deserialize(base::PickleIterator* it) { | 121 bool EntryMetadata::Deserialize(base::PickleIterator* it, |
| 122 bool has_entry_in_memory_data) { |
| 117 DCHECK(it); | 123 DCHECK(it); |
| 118 int64_t tmp_last_used_time; | 124 int64_t tmp_last_used_time; |
| 119 uint64_t tmp_entry_size; | 125 uint64_t tmp_entry_size; |
| 120 if (!it->ReadInt64(&tmp_last_used_time) || !it->ReadUInt64(&tmp_entry_size) || | 126 if (!it->ReadInt64(&tmp_last_used_time) || !it->ReadUInt64(&tmp_entry_size) || |
| 121 tmp_entry_size > std::numeric_limits<decltype(entry_size_)>::max()) | 127 tmp_entry_size > std::numeric_limits<uint32_t>::max()) |
| 122 return false; | 128 return false; |
| 123 SetLastUsedTime(base::Time::FromInternalValue(tmp_last_used_time)); | 129 SetLastUsedTime(base::Time::FromInternalValue(tmp_last_used_time)); |
| 124 entry_size_ = static_cast<uint32_t>(tmp_entry_size); | 130 if (has_entry_in_memory_data) { |
| 131 // tmp_entry_size actually packs entry_size_256b_chunks_ and |
| 132 // in_memory_data_. |
| 133 SetEntrySize(static_cast<uint32_t>(tmp_entry_size & 0xFFFFFF00)); |
| 134 SetInMemoryData(static_cast<uint8_t>(tmp_entry_size & 0xFF)); |
| 135 } else { |
| 136 SetEntrySize(static_cast<uint32_t>(tmp_entry_size)); |
| 137 SetInMemoryData(0); |
| 138 } |
| 125 return true; | 139 return true; |
| 126 } | 140 } |
| 127 | 141 |
| 128 SimpleIndex::SimpleIndex( | 142 SimpleIndex::SimpleIndex( |
| 129 const scoped_refptr<base::SingleThreadTaskRunner>& io_thread, | 143 const scoped_refptr<base::SingleThreadTaskRunner>& io_thread, |
| 130 scoped_refptr<BackendCleanupTracker> cleanup_tracker, | 144 scoped_refptr<BackendCleanupTracker> cleanup_tracker, |
| 131 SimpleIndexDelegate* delegate, | 145 SimpleIndexDelegate* delegate, |
| 132 net::CacheType cache_type, | 146 net::CacheType cache_type, |
| 133 std::unique_ptr<SimpleIndexFile> index_file) | 147 std::unique_ptr<SimpleIndexFile> index_file) |
| 134 : cleanup_tracker_(std::move(cleanup_tracker)), | 148 : cleanup_tracker_(std::move(cleanup_tracker)), |
| (...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 285 removed_entries_.insert(entry_hash); | 299 removed_entries_.insert(entry_hash); |
| 286 PostponeWritingToDisk(); | 300 PostponeWritingToDisk(); |
| 287 } | 301 } |
| 288 | 302 |
| 289 bool SimpleIndex::Has(uint64_t hash) const { | 303 bool SimpleIndex::Has(uint64_t hash) const { |
| 290 DCHECK(io_thread_checker_.CalledOnValidThread()); | 304 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 291 // If not initialized, always return true, forcing it to go to the disk. | 305 // If not initialized, always return true, forcing it to go to the disk. |
| 292 return !initialized_ || entries_set_.count(hash) > 0; | 306 return !initialized_ || entries_set_.count(hash) > 0; |
| 293 } | 307 } |
| 294 | 308 |
| 309 uint8_t SimpleIndex::GetEntryInMemoryData(uint64_t entry_hash) const { |
| 310 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 311 EntrySet::const_iterator it = entries_set_.find(entry_hash); |
| 312 if (it == entries_set_.end()) |
| 313 return 0; |
| 314 return it->second.GetInMemoryData(); |
| 315 } |
| 316 |
| 317 void SimpleIndex::SetEntryInMemoryData(uint64_t entry_hash, uint8_t value) { |
| 318 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 319 EntrySet::iterator it = entries_set_.find(entry_hash); |
| 320 if (it == entries_set_.end()) |
| 321 return; |
| 322 return it->second.SetInMemoryData(value); |
| 323 } |
| 324 |
| 295 bool SimpleIndex::UseIfExists(uint64_t entry_hash) { | 325 bool SimpleIndex::UseIfExists(uint64_t entry_hash) { |
| 296 DCHECK(io_thread_checker_.CalledOnValidThread()); | 326 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 297 // Always update the last used time, even if it is during initialization. | 327 // Always update the last used time, even if it is during initialization. |
| 298 // It will be merged later. | 328 // It will be merged later. |
| 299 EntrySet::iterator it = entries_set_.find(entry_hash); | 329 EntrySet::iterator it = entries_set_.find(entry_hash); |
| 300 if (it == entries_set_.end()) | 330 if (it == entries_set_.end()) |
| 301 // If not initialized, always return true, forcing it to go to the disk. | 331 // If not initialized, always return true, forcing it to go to the disk. |
| 302 return !initialized_; | 332 return !initialized_; |
| 303 it->second.SetLastUsedTime(base::Time::Now()); | 333 it->second.SetLastUsedTime(base::Time::Now()); |
| 304 PostponeWritingToDisk(); | 334 PostponeWritingToDisk(); |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 420 FROM_HERE, base::TimeDelta::FromMilliseconds(delay), write_to_disk_cb_); | 450 FROM_HERE, base::TimeDelta::FromMilliseconds(delay), write_to_disk_cb_); |
| 421 } | 451 } |
| 422 | 452 |
| 423 void SimpleIndex::UpdateEntryIteratorSize( | 453 void SimpleIndex::UpdateEntryIteratorSize( |
| 424 EntrySet::iterator* it, | 454 EntrySet::iterator* it, |
| 425 base::StrictNumeric<uint32_t> entry_size) { | 455 base::StrictNumeric<uint32_t> entry_size) { |
| 426 // Update the total cache size with the new entry size. | 456 // Update the total cache size with the new entry size. |
| 427 DCHECK(io_thread_checker_.CalledOnValidThread()); | 457 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 428 DCHECK_GE(cache_size_, (*it)->second.GetEntrySize()); | 458 DCHECK_GE(cache_size_, (*it)->second.GetEntrySize()); |
| 429 cache_size_ -= (*it)->second.GetEntrySize(); | 459 cache_size_ -= (*it)->second.GetEntrySize(); |
| 430 cache_size_ += static_cast<uint32_t>(entry_size); | |
| 431 (*it)->second.SetEntrySize(entry_size); | 460 (*it)->second.SetEntrySize(entry_size); |
| 461 // We use GetEntrySize to get consistent rounding. |
| 462 cache_size_ += (*it)->second.GetEntrySize(); |
| 432 } | 463 } |
| 433 | 464 |
| 434 void SimpleIndex::MergeInitializingSet( | 465 void SimpleIndex::MergeInitializingSet( |
| 435 std::unique_ptr<SimpleIndexLoadResult> load_result) { | 466 std::unique_ptr<SimpleIndexLoadResult> load_result) { |
| 436 DCHECK(io_thread_checker_.CalledOnValidThread()); | 467 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 437 | 468 |
| 438 EntrySet* index_file_entries = &load_result->entries; | 469 EntrySet* index_file_entries = &load_result->entries; |
| 439 | 470 |
| 440 for (std::unordered_set<uint64_t>::const_iterator it = | 471 for (std::unordered_set<uint64_t>::const_iterator it = |
| 441 removed_entries_.begin(); | 472 removed_entries_.begin(); |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 538 // written back. | 569 // written back. |
| 539 after_write = base::Bind([](scoped_refptr<BackendCleanupTracker>) {}, | 570 after_write = base::Bind([](scoped_refptr<BackendCleanupTracker>) {}, |
| 540 cleanup_tracker_); | 571 cleanup_tracker_); |
| 541 } | 572 } |
| 542 | 573 |
| 543 index_file_->WriteToDisk(reason, entries_set_, cache_size_, start, | 574 index_file_->WriteToDisk(reason, entries_set_, cache_size_, start, |
| 544 app_on_background_, after_write); | 575 app_on_background_, after_write); |
| 545 } | 576 } |
| 546 | 577 |
| 547 } // namespace disk_cache | 578 } // namespace disk_cache |
| OLD | NEW |