Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(534)

Side by Side Diff: net/disk_cache/simple/simple_index.cc

Issue 2922973003: RFC: use some in-memory state in SimpleCache to quickly cache-miss some CantConditionalize cases
Patch Set: omewhat better take at higher-level HC::T impl, a bit lessy hacky, and actually write to cache now. Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/simple/simple_index.h" 5 #include "net/disk_cache/simple/simple_index.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 #include <string> 9 #include <string>
10 #include <utility> 10 #include <utility>
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
53 53
54 namespace disk_cache { 54 namespace disk_cache {
55 55
56 EntryMetadata::EntryMetadata() 56 EntryMetadata::EntryMetadata()
57 : last_used_time_seconds_since_epoch_(0), 57 : last_used_time_seconds_since_epoch_(0),
58 entry_size_(0) { 58 entry_size_(0) {
59 } 59 }
60 60
61 EntryMetadata::EntryMetadata(base::Time last_used_time, 61 EntryMetadata::EntryMetadata(base::Time last_used_time,
62 base::StrictNumeric<uint32_t> entry_size) 62 base::StrictNumeric<uint32_t> entry_size)
63 : last_used_time_seconds_since_epoch_(0), entry_size_(entry_size) { 63 : last_used_time_seconds_since_epoch_(0), entry_size_(0) {
64 SetEntrySize(entry_size); // to round/pack properly.
64 SetLastUsedTime(last_used_time); 65 SetLastUsedTime(last_used_time);
65 } 66 }
66 67
67 base::Time EntryMetadata::GetLastUsedTime() const { 68 base::Time EntryMetadata::GetLastUsedTime() const {
68 // Preserve nullity. 69 // Preserve nullity.
69 if (last_used_time_seconds_since_epoch_ == 0) 70 if (last_used_time_seconds_since_epoch_ == 0)
70 return base::Time(); 71 return base::Time();
71 72
72 return base::Time::UnixEpoch() + 73 return base::Time::UnixEpoch() +
73 base::TimeDelta::FromSeconds(last_used_time_seconds_since_epoch_); 74 base::TimeDelta::FromSeconds(last_used_time_seconds_since_epoch_);
74 } 75 }
75 76
76 void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) { 77 void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) {
77 // Preserve nullity. 78 // Preserve nullity.
78 if (last_used_time.is_null()) { 79 if (last_used_time.is_null()) {
79 last_used_time_seconds_since_epoch_ = 0; 80 last_used_time_seconds_since_epoch_ = 0;
80 return; 81 return;
81 } 82 }
82 83
83 last_used_time_seconds_since_epoch_ = base::saturated_cast<uint32_t>( 84 last_used_time_seconds_since_epoch_ = base::saturated_cast<uint32_t>(
84 (last_used_time - base::Time::UnixEpoch()).InSeconds()); 85 (last_used_time - base::Time::UnixEpoch()).InSeconds());
85 // Avoid accidental nullity. 86 // Avoid accidental nullity.
86 if (last_used_time_seconds_since_epoch_ == 0) 87 if (last_used_time_seconds_since_epoch_ == 0)
87 last_used_time_seconds_since_epoch_ = 1; 88 last_used_time_seconds_since_epoch_ = 1;
88 } 89 }
89 90
90 uint32_t EntryMetadata::GetEntrySize() const { 91 uint32_t EntryMetadata::GetEntrySize() const {
91 return entry_size_; 92 return entry_size_ << 8;
92 } 93 }
93 94
94 void EntryMetadata::SetEntrySize(base::StrictNumeric<uint32_t> entry_size) { 95 void EntryMetadata::SetEntrySize(base::StrictNumeric<uint32_t> entry_size) {
95 entry_size_ = entry_size; 96 // ### what happens if we overflow here?
97 entry_size_ = (static_cast<uint32_t>(entry_size) + 255) >> 8;
96 } 98 }
97 99
98 void EntryMetadata::Serialize(base::Pickle* pickle) const { 100 void EntryMetadata::Serialize(base::Pickle* pickle) const {
99 DCHECK(pickle); 101 DCHECK(pickle);
100 int64_t internal_last_used_time = GetLastUsedTime().ToInternalValue(); 102 int64_t internal_last_used_time = GetLastUsedTime().ToInternalValue();
101 // If you modify the size of the size of the pickle, be sure to update 103 // If you modify the size of the size of the pickle, be sure to update
102 // kOnDiskSizeBytes. 104 // kOnDiskSizeBytes.
105 uint32_t packed_entry_info = (entry_size_ << 8) | memory_entry_data_;
103 pickle->WriteInt64(internal_last_used_time); 106 pickle->WriteInt64(internal_last_used_time);
104 pickle->WriteUInt64(entry_size_); 107 pickle->WriteUInt64(packed_entry_info);
105 } 108 }
106 109
107 bool EntryMetadata::Deserialize(base::PickleIterator* it) { 110 bool EntryMetadata::Deserialize(base::PickleIterator* it,
111 bool has_memory_entry_data) {
108 DCHECK(it); 112 DCHECK(it);
109 int64_t tmp_last_used_time; 113 int64_t tmp_last_used_time;
110 uint64_t tmp_entry_size; 114 uint64_t tmp_entry_size;
111 if (!it->ReadInt64(&tmp_last_used_time) || !it->ReadUInt64(&tmp_entry_size) || 115 if (!it->ReadInt64(&tmp_last_used_time) || !it->ReadUInt64(&tmp_entry_size) ||
112 tmp_entry_size > std::numeric_limits<decltype(entry_size_)>::max()) 116 tmp_entry_size > std::numeric_limits<decltype(entry_size_)>::max())
113 return false; 117 return false;
114 SetLastUsedTime(base::Time::FromInternalValue(tmp_last_used_time)); 118 SetLastUsedTime(base::Time::FromInternalValue(tmp_last_used_time));
115 entry_size_ = static_cast<uint32_t>(tmp_entry_size); 119 if (has_memory_entry_data) {
120 // tmp_entry_size actually packs entry_size_ and memory_entry_data_.
121 SetEntrySize(static_cast<uint32_t>(tmp_entry_size & 0xFFFFFF00));
122 SetMemoryEntryData(static_cast<uint8_t>(tmp_entry_size & 0xFF));
123 } else {
124 SetEntrySize(static_cast<uint32_t>(tmp_entry_size));
125 SetMemoryEntryData(0);
126 }
116 return true; 127 return true;
117 } 128 }
118 129
119 SimpleIndex::SimpleIndex( 130 SimpleIndex::SimpleIndex(
120 const scoped_refptr<base::SingleThreadTaskRunner>& io_thread, 131 const scoped_refptr<base::SingleThreadTaskRunner>& io_thread,
121 SimpleIndexDelegate* delegate, 132 SimpleIndexDelegate* delegate,
122 net::CacheType cache_type, 133 net::CacheType cache_type,
123 std::unique_ptr<SimpleIndexFile> index_file) 134 std::unique_ptr<SimpleIndexFile> index_file)
124 : delegate_(delegate), 135 : delegate_(delegate),
125 cache_type_(cache_type), 136 cache_type_(cache_type),
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
274 removed_entries_.insert(entry_hash); 285 removed_entries_.insert(entry_hash);
275 PostponeWritingToDisk(); 286 PostponeWritingToDisk();
276 } 287 }
277 288
278 bool SimpleIndex::Has(uint64_t hash) const { 289 bool SimpleIndex::Has(uint64_t hash) const {
279 DCHECK(io_thread_checker_.CalledOnValidThread()); 290 DCHECK(io_thread_checker_.CalledOnValidThread());
280 // If not initialized, always return true, forcing it to go to the disk. 291 // If not initialized, always return true, forcing it to go to the disk.
281 return !initialized_ || entries_set_.count(hash) > 0; 292 return !initialized_ || entries_set_.count(hash) > 0;
282 } 293 }
283 294
295 uint8_t SimpleIndex::GetMemoryEntryData(uint64_t entry_hash) const {
296 DCHECK(io_thread_checker_.CalledOnValidThread());
297 EntrySet::const_iterator it = entries_set_.find(entry_hash);
298 if (it == entries_set_.end())
299 return 0;
300 return it->second.GetMemoryEntryData();
301 }
302
303 void SimpleIndex::SetMemoryEntryData(uint64_t entry_hash, uint8_t value) {
304 DCHECK(io_thread_checker_.CalledOnValidThread());
305 EntrySet::iterator it = entries_set_.find(entry_hash);
306 if (it == entries_set_.end())
307 return;
308 return it->second.SetMemoryEntryData(value);
309 }
310
284 bool SimpleIndex::UseIfExists(uint64_t entry_hash) { 311 bool SimpleIndex::UseIfExists(uint64_t entry_hash) {
285 DCHECK(io_thread_checker_.CalledOnValidThread()); 312 DCHECK(io_thread_checker_.CalledOnValidThread());
286 // Always update the last used time, even if it is during initialization. 313 // Always update the last used time, even if it is during initialization.
287 // It will be merged later. 314 // It will be merged later.
288 EntrySet::iterator it = entries_set_.find(entry_hash); 315 EntrySet::iterator it = entries_set_.find(entry_hash);
289 if (it == entries_set_.end()) 316 if (it == entries_set_.end())
290 // If not initialized, always return true, forcing it to go to the disk. 317 // If not initialized, always return true, forcing it to go to the disk.
291 return !initialized_; 318 return !initialized_;
292 it->second.SetLastUsedTime(base::Time::Now()); 319 it->second.SetLastUsedTime(base::Time::Now());
293 PostponeWritingToDisk(); 320 PostponeWritingToDisk();
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
403 FROM_HERE, base::TimeDelta::FromMilliseconds(delay), write_to_disk_cb_); 430 FROM_HERE, base::TimeDelta::FromMilliseconds(delay), write_to_disk_cb_);
404 } 431 }
405 432
406 void SimpleIndex::UpdateEntryIteratorSize( 433 void SimpleIndex::UpdateEntryIteratorSize(
407 EntrySet::iterator* it, 434 EntrySet::iterator* it,
408 base::StrictNumeric<uint32_t> entry_size) { 435 base::StrictNumeric<uint32_t> entry_size) {
409 // Update the total cache size with the new entry size. 436 // Update the total cache size with the new entry size.
410 DCHECK(io_thread_checker_.CalledOnValidThread()); 437 DCHECK(io_thread_checker_.CalledOnValidThread());
411 DCHECK_GE(cache_size_, (*it)->second.GetEntrySize()); 438 DCHECK_GE(cache_size_, (*it)->second.GetEntrySize());
412 cache_size_ -= (*it)->second.GetEntrySize(); 439 cache_size_ -= (*it)->second.GetEntrySize();
413 cache_size_ += static_cast<uint32_t>(entry_size);
414 (*it)->second.SetEntrySize(entry_size); 440 (*it)->second.SetEntrySize(entry_size);
441 // We use GetEntrySize to get consistent rounding.
442 cache_size_ += (*it)->second.GetEntrySize();
415 } 443 }
416 444
417 void SimpleIndex::MergeInitializingSet( 445 void SimpleIndex::MergeInitializingSet(
418 std::unique_ptr<SimpleIndexLoadResult> load_result) { 446 std::unique_ptr<SimpleIndexLoadResult> load_result) {
419 DCHECK(io_thread_checker_.CalledOnValidThread()); 447 DCHECK(io_thread_checker_.CalledOnValidThread());
420 DCHECK(load_result->did_load); 448 DCHECK(load_result->did_load);
421 449
422 EntrySet* index_file_entries = &load_result->entries; 450 EntrySet* index_file_entries = &load_result->entries;
423 451
424 for (std::unordered_set<uint64_t>::const_iterator it = 452 for (std::unordered_set<uint64_t>::const_iterator it =
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
514 start - last_write_to_disk_); 542 start - last_write_to_disk_);
515 } 543 }
516 } 544 }
517 last_write_to_disk_ = start; 545 last_write_to_disk_ = start;
518 546
519 index_file_->WriteToDisk(reason, entries_set_, cache_size_, start, 547 index_file_->WriteToDisk(reason, entries_set_, cache_size_, start,
520 app_on_background_, base::Closure()); 548 app_on_background_, base::Closure());
521 } 549 }
522 550
523 } // namespace disk_cache 551 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698