OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/disk_cache/backend_impl.h" | 5 #include "net/disk_cache/v3/backend_impl_v3.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/bind_helpers.h" | 8 #include "base/bind_helpers.h" |
9 #include "base/file_util.h" | 9 #include "base/file_util.h" |
10 #include "base/files/file_path.h" | 10 #include "base/files/file_path.h" |
11 #include "base/hash.h" | 11 #include "base/hash.h" |
12 #include "base/message_loop/message_loop.h" | 12 #include "base/message_loop/message_loop.h" |
13 #include "base/metrics/field_trial.h" | 13 #include "base/metrics/field_trial.h" |
14 #include "base/metrics/histogram.h" | 14 #include "base/metrics/histogram.h" |
15 #include "base/metrics/stats_counters.h" | 15 #include "base/metrics/stats_counters.h" |
16 #include "base/rand_util.h" | 16 #include "base/rand_util.h" |
17 #include "base/strings/string_util.h" | 17 #include "base/strings/string_util.h" |
18 #include "base/strings/stringprintf.h" | 18 #include "base/strings/stringprintf.h" |
19 #include "base/sys_info.h" | 19 #include "base/sys_info.h" |
20 #include "base/threading/thread_restrictions.h" | 20 #include "base/threading/thread_restrictions.h" |
| 21 #include "base/threading/worker_pool.h" |
21 #include "base/time/time.h" | 22 #include "base/time/time.h" |
22 #include "base/timer/timer.h" | 23 #include "base/timer/timer.h" |
23 #include "net/base/net_errors.h" | 24 #include "net/base/net_errors.h" |
24 #include "net/disk_cache/cache_util.h" | 25 #include "net/base/io_buffer.h" |
25 #include "net/disk_cache/entry_impl.h" | |
26 #include "net/disk_cache/errors.h" | 26 #include "net/disk_cache/errors.h" |
27 #include "net/disk_cache/experiments.h" | 27 #include "net/disk_cache/experiments.h" |
28 #include "net/disk_cache/file.h" | 28 #include "net/disk_cache/file.h" |
| 29 #include "net/disk_cache/storage_block-inl.h" |
| 30 #include "net/disk_cache/v3/backend_worker.h" |
| 31 #include "net/disk_cache/v3/backend_work_item.h" |
| 32 #include "net/disk_cache/v3/disk_format_v3.h" |
| 33 #include "net/disk_cache/v3/entry_impl_v3.h" |
| 34 #include "net/disk_cache/v3/index_table.h" |
29 | 35 |
30 // This has to be defined before including histogram_macros.h from this file. | 36 // This has to be defined before including histogram_macros.h from this file. |
31 #define NET_DISK_CACHE_BACKEND_IMPL_CC_ | 37 #define NET_DISK_CACHE_BACKEND_IMPL_CC_ |
32 #include "net/disk_cache/histogram_macros.h" | 38 #include "net/disk_cache/histogram_macros.h" |
33 | 39 |
34 using base::Time; | 40 using base::Time; |
35 using base::TimeDelta; | 41 using base::TimeDelta; |
36 using base::TimeTicks; | 42 using base::TimeTicks; |
37 | 43 |
38 namespace { | 44 namespace { |
39 | 45 |
40 const char* kIndexName = "index"; | |
41 | |
42 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. | 46 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. |
43 // Note that the actual target is to keep the index table load factor under 55% | 47 // Note that the actual target is to keep the index table load factor under 55% |
44 // for most users. | 48 // for most users. |
45 const int k64kEntriesStore = 240 * 1000 * 1000; | 49 const int k64kEntriesStore = 240 * 1000 * 1000; |
46 const int kBaseTableLen = 64 * 1024; | 50 const int kBaseTableLen = 64 * 1024; |
47 const int kDefaultCacheSize = 80 * 1024 * 1024; | 51 const int kDefaultCacheSize = 80 * 1024 * 1024; |
48 | 52 |
49 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). | 53 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). |
50 const int kTrimDelay = 10; | 54 const int kTrimDelay = 10; |
| 55 const int kTimerSeconds = 30; |
| 56 |
| 57 const size_t kMaxKeySize = 64 * 1024; |
51 | 58 |
52 int DesiredIndexTableLen(int32 storage_size) { | 59 int DesiredIndexTableLen(int32 storage_size) { |
53 if (storage_size <= k64kEntriesStore) | 60 if (storage_size <= k64kEntriesStore) |
54 return kBaseTableLen; | 61 return kBaseTableLen; |
55 if (storage_size <= k64kEntriesStore * 2) | 62 if (storage_size <= k64kEntriesStore * 2) |
56 return kBaseTableLen * 2; | 63 return kBaseTableLen * 2; |
57 if (storage_size <= k64kEntriesStore * 4) | 64 if (storage_size <= k64kEntriesStore * 4) |
58 return kBaseTableLen * 4; | 65 return kBaseTableLen * 4; |
59 if (storage_size <= k64kEntriesStore * 8) | 66 if (storage_size <= k64kEntriesStore * 8) |
60 return kBaseTableLen * 8; | 67 return kBaseTableLen * 8; |
61 | 68 |
62 // The biggest storage_size for int32 requires a 4 MB table. | 69 // The biggest storage_size for int32 requires a 4 MB table. |
63 return kBaseTableLen * 16; | 70 return kBaseTableLen * 16; |
64 } | 71 } |
65 | 72 |
66 int MaxStorageSizeForTable(int table_len) { | 73 int MaxStorageSizeForTable(int table_len) { |
67 return table_len * (k64kEntriesStore / kBaseTableLen); | 74 return table_len * (k64kEntriesStore / kBaseTableLen); |
68 } | 75 } |
69 | 76 |
70 size_t GetIndexSize(int table_len) { | 77 size_t GetIndexBitmapSize(int table_len) { |
71 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; | 78 DCHECK_LT(table_len, 1 << 22); |
72 return sizeof(disk_cache::IndexHeader) + table_size; | 79 size_t base_bits = disk_cache::kBaseBitmapBytes * 8; |
| 80 if (table_len < static_cast<int>(base_bits)) |
| 81 return sizeof(disk_cache::IndexBitmap); |
| 82 |
| 83 size_t num_pages = (table_len / 8) - disk_cache::kBaseBitmapBytes; |
| 84 num_pages = (num_pages + 4095) / 4096; |
| 85 return sizeof(disk_cache::IndexHeaderV3) + num_pages * 4096; |
73 } | 86 } |
74 | 87 |
75 } // namespace | 88 } // namespace |
76 | 89 |
77 // ------------------------------------------------------------------------ | 90 // ------------------------------------------------------------------------ |
78 | 91 |
79 namespace disk_cache { | 92 namespace disk_cache { |
80 | 93 |
81 BackendImpl::BackendImpl(const base::FilePath& path, | 94 // Exported by disk_cache/backend_impl.cc |
82 base::MessageLoopProxy* cache_thread, | 95 // Returns the preferred max cache size given the available disk space. |
83 net::NetLog* net_log) | 96 NET_EXPORT_PRIVATE int PreferedCacheSize(int64 available); |
84 : background_queue_(this, cache_thread), | 97 |
| 98 BackendImplV3::BackendImplV3(const base::FilePath& path, |
| 99 base::MessageLoopProxy* cache_thread, |
| 100 net::NetLog* net_log) |
| 101 : index_(this), |
85 path_(path), | 102 path_(path), |
86 block_files_(path), | 103 block_files_(this), |
87 mask_(0), | |
88 max_size_(0), | 104 max_size_(0), |
89 up_ticks_(0), | 105 up_ticks_(0), |
| 106 test_seconds_(0), |
90 cache_type_(net::DISK_CACHE), | 107 cache_type_(net::DISK_CACHE), |
91 uma_report_(0), | 108 uma_report_(0), |
92 user_flags_(0), | 109 user_flags_(0), |
93 init_(false), | 110 init_(false), |
94 restarted_(false), | 111 restarted_(false), |
95 unit_test_(false), | |
96 read_only_(false), | 112 read_only_(false), |
97 disabled_(false), | 113 disabled_(false), |
98 new_eviction_(false), | 114 lru_eviction_(true), |
99 first_timer_(true), | 115 first_timer_(true), |
100 user_load_(false), | 116 user_load_(false), |
| 117 growing_index_(false), |
| 118 growing_files_(false), |
101 net_log_(net_log), | 119 net_log_(net_log), |
102 done_(true, false), | 120 cache_thread_(cache_thread), |
103 ptr_factory_(this) { | 121 ptr_factory_(this) { |
104 } | 122 } |
105 | 123 |
106 BackendImpl::BackendImpl(const base::FilePath& path, | 124 BackendImplV3::~BackendImplV3() { |
107 uint32 mask, | 125 CleanupCache(); |
108 base::MessageLoopProxy* cache_thread, | |
109 net::NetLog* net_log) | |
110 : background_queue_(this, cache_thread), | |
111 path_(path), | |
112 block_files_(path), | |
113 mask_(mask), | |
114 max_size_(0), | |
115 up_ticks_(0), | |
116 cache_type_(net::DISK_CACHE), | |
117 uma_report_(0), | |
118 user_flags_(kMask), | |
119 init_(false), | |
120 restarted_(false), | |
121 unit_test_(false), | |
122 read_only_(false), | |
123 disabled_(false), | |
124 new_eviction_(false), | |
125 first_timer_(true), | |
126 user_load_(false), | |
127 net_log_(net_log), | |
128 done_(true, false), | |
129 ptr_factory_(this) { | |
130 } | 126 } |
131 | 127 |
132 BackendImpl::~BackendImpl() { | 128 int BackendImplV3::Init(const CompletionCallback& callback) { |
133 if (user_flags_ & kNoRandom) { | 129 DCHECK(!init_); |
134 // This is a unit test, so we want to be strict about not leaking entries | 130 if (init_) |
135 // and completing all the work. | 131 return net::ERR_FAILED; |
136 background_queue_.WaitForPendingIO(); | |
137 } else { | |
138 // This is most likely not a test, so we want to do as little work as | |
139 // possible at this time, at the price of leaving dirty entries behind. | |
140 background_queue_.DropPendingIO(); | |
141 } | |
142 | 132 |
143 if (background_queue_.BackgroundIsCurrentThread()) { | 133 worker_ = new Worker(path_, base::MessageLoopProxy::current()); |
144 // Unit tests may use the same thread for everything. | 134 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_INIT); |
145 CleanupCache(); | 135 work_item->set_user_callback(callback); |
146 } else { | 136 work_item->set_flags(user_flags_); |
147 background_queue_.background_thread()->PostTask( | 137 PostWorkItem(work_item); |
148 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this))); | |
149 // http://crbug.com/74623 | |
150 base::ThreadRestrictions::ScopedAllowWait allow_wait; | |
151 done_.Wait(); | |
152 } | |
153 } | |
154 | 138 |
155 int BackendImpl::Init(const CompletionCallback& callback) { | |
156 background_queue_.Init(callback); | |
157 return net::ERR_IO_PENDING; | 139 return net::ERR_IO_PENDING; |
158 } | 140 } |
159 | 141 |
160 // ------------------------------------------------------------------------ | 142 // ------------------------------------------------------------------------ |
161 | 143 |
162 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry, | 144 int BackendImplV3::OpenPrevEntry(void** iter, Entry** prev_entry, |
163 const CompletionCallback& callback) { | 145 const CompletionCallback& callback) { |
164 DCHECK(!callback.is_null()); | 146 DCHECK(!callback.is_null()); |
165 background_queue_.OpenPrevEntry(iter, prev_entry, callback); | 147 return OpenFollowingEntry(true, iter, prev_entry, callback); |
166 return net::ERR_IO_PENDING; | |
167 } | 148 } |
168 | 149 |
169 bool BackendImpl::SetMaxSize(int max_bytes) { | 150 bool BackendImplV3::SetMaxSize(int max_bytes) { |
170 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); | 151 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); |
171 if (max_bytes < 0) | 152 if (max_bytes < 0) |
172 return false; | 153 return false; |
173 | 154 |
174 // Zero size means use the default. | 155 // Zero size means use the default. |
175 if (!max_bytes) | 156 if (!max_bytes) |
176 return true; | 157 return true; |
177 | 158 |
178 // Avoid a DCHECK later on. | 159 // Avoid a DCHECK later on. |
179 if (max_bytes >= kint32max - kint32max / 10) | 160 if (max_bytes >= kint32max - kint32max / 10) |
180 max_bytes = kint32max - kint32max / 10 - 1; | 161 max_bytes = kint32max - kint32max / 10 - 1; |
181 | 162 |
182 user_flags_ |= kMaxSize; | 163 user_flags_ |= MAX_SIZE; |
183 max_size_ = max_bytes; | 164 max_size_ = max_bytes; |
184 return true; | 165 return true; |
185 } | 166 } |
186 | 167 |
187 void BackendImpl::SetType(net::CacheType type) { | 168 void BackendImplV3::SetType(net::CacheType type) { |
188 DCHECK_NE(net::MEMORY_CACHE, type); | 169 DCHECK_NE(net::MEMORY_CACHE, type); |
189 cache_type_ = type; | 170 cache_type_ = type; |
190 } | 171 } |
191 | 172 |
192 bool BackendImpl::CreateBlock(FileType block_type, int block_count, | 173 bool BackendImplV3::CreateBlock(FileType block_type, int block_count, |
193 Addr* block_address) { | 174 Addr* block_address) { |
194 return block_files_.CreateBlock(block_type, block_count, block_address); | 175 return block_files_.CreateBlock(block_type, block_count, block_address); |
195 } | 176 } |
196 | 177 |
197 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) { | 178 void BackendImplV3::UpdateRank(EntryImplV3* entry, bool modified) { |
198 if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE)) | 179 if (!modified && (cache_type() == net::SHADER_CACHE || read_only_)) |
199 return; | 180 return; |
200 eviction_.UpdateRank(entry, modified); | 181 |
201 } | 182 index_.UpdateTime(entry->GetHash(), entry->GetAddress(), GetCurrentTime()); |
202 | 183 } |
203 void BackendImpl::InternalDoomEntry(EntryImpl* entry) { | 184 |
| 185 void BackendImplV3::InternalDoomEntry(EntryImplV3* entry) { |
204 uint32 hash = entry->GetHash(); | 186 uint32 hash = entry->GetHash(); |
205 std::string key = entry->GetKey(); | 187 std::string key = entry->GetKey(); |
206 Addr entry_addr = entry->entry()->address(); | 188 Addr entry_addr = entry->GetAddress(); |
207 bool error; | 189 |
208 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error); | |
209 CacheAddr child(entry->GetNextAddress()); | |
210 | |
211 Trace("Doom entry 0x%p", entry); | 190 Trace("Doom entry 0x%p", entry); |
212 | 191 |
213 if (!entry->doomed()) { | 192 index_.SetSate(hash, entry_addr, ENTRY_DELETED); |
214 // We may have doomed this entry from within MatchEntry. | 193 |
215 eviction_.OnDoomEntry(entry); | 194 // The entry is transitioning from open to doomed. |
216 entry->InternalDoom(); | 195 doomed_entries_[entry_addr.value()] = entry; |
217 if (!new_eviction_) { | 196 EntriesMap::iterator it = open_entries_.find(entry_addr.value()); |
218 DecreaseNumEntries(); | 197 if (it != open_entries_.end()) |
| 198 open_entries_.erase(it); |
| 199 else |
| 200 NOTREACHED(); |
| 201 |
| 202 entry->InternalDoom(); |
| 203 DecreaseNumEntries(); |
| 204 } |
| 205 |
| 206 bool BackendImplV3::ShouldDeleteNow(EntryImplV3* entry) { |
| 207 Addr entry_addr = entry->GetAddress(); |
| 208 DCHECK(doomed_entries_.count(entry_addr.value())); |
| 209 EntriesMap::iterator it = entries_to_delete_.find(entry_addr.value()); |
| 210 if (it == entries_to_delete_.end()) { |
| 211 // Delay deletion until the next backup cycle. |
| 212 entries_to_delete_[entry_addr.value()] = entry; |
| 213 entry->AddRef(); |
| 214 |
| 215 // The entry was ready to be deleted. By opening it again we make sure |
| 216 // we'll go again through the normal Close() logic later on, and we'll have |
| 217 // a second chance to allow deletion. |
| 218 entry->OnOpenEntry(); |
| 219 return false; |
| 220 } |
| 221 |
| 222 entries_to_delete_.erase(it); |
| 223 return true; |
| 224 } |
| 225 |
| 226 void BackendImplV3::OnEntryCleanup(EntryImplV3* entry) { |
| 227 // An entry may be going away pretty soon (as soon as all pending IO is done). |
| 228 // Grab an extra reference so that the entry is alive for a little longer and |
| 229 // we may reuse it directly. |
| 230 if (recent_entries_.insert(entry).second) |
| 231 entry->AddRef(); |
| 232 } |
| 233 |
| 234 void BackendImplV3::OnEntryDestroyBegin(Addr address) { |
| 235 if (disabled_) |
| 236 return; |
| 237 EntriesMap::iterator it = open_entries_.find(address.value()); |
| 238 if (it != open_entries_.end()) { |
| 239 index_.SetSate(it->second->GetHash(), address, ENTRY_USED); |
| 240 open_entries_.erase(it); |
| 241 } else { |
| 242 it = doomed_entries_.find(address.value()); |
| 243 if (it != doomed_entries_.end()) { |
| 244 // All data is gone. Wait for the next backup cycle before releasing the |
| 245 // cell itself. |
| 246 CellInfo cell_info = { it->second->GetHash(), address }; |
| 247 deleted_entries_.push_back(cell_info); |
| 248 doomed_entries_.erase(it); |
219 } | 249 } |
220 stats_.OnEvent(Stats::DOOM_ENTRY); | 250 } |
221 } | 251 } |
222 | 252 |
223 if (parent_entry) { | 253 void BackendImplV3::OnEntryDestroyEnd() { |
224 parent_entry->SetNextAddress(Addr(child)); | 254 DecreaseNumRefs(); |
225 parent_entry->Release(); | 255 if (disabled_) |
226 } else if (!error) { | 256 return; |
227 data_->table[hash & mask_] = child; | 257 if (index_.header()->num_bytes > max_size_ && !read_only_ && |
228 } | 258 (up_ticks_ > kTrimDelay || user_flags_ & BASIC_UNIT_TEST)) { |
229 | 259 eviction_.TrimCache(); |
230 FlushIndex(); | 260 } |
231 } | 261 } |
232 | 262 |
233 void BackendImpl::OnEntryDestroyBegin(Addr address) { | 263 void BackendImplV3::OnEntryModified(EntryImplV3* entry) { |
| 264 index_.SetSate(entry->GetHash(), entry->GetAddress(), ENTRY_MODIFIED); |
| 265 } |
| 266 |
| 267 void BackendImplV3::ReadData(EntryImplV3* entry, Addr address, int offset, |
| 268 net::IOBuffer* buffer, int buffer_len, |
| 269 const CompletionCallback& callback) { |
| 270 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_READ_DATA); |
| 271 work_item->set_buffer(buffer); |
| 272 work_item->set_buffer_len(buffer_len); |
| 273 work_item->set_address(address); |
| 274 work_item->set_offset(offset); |
| 275 work_item->set_user_callback(callback); |
| 276 if (entry) |
| 277 work_item->set_owner_entry(entry); |
| 278 |
| 279 PostWorkItem(work_item); |
| 280 } |
| 281 |
| 282 void BackendImplV3::WriteData(EntryImplV3* entry, Addr address, int offset, |
| 283 net::IOBuffer* buffer, int buffer_len, |
| 284 const CompletionCallback& callback) { |
| 285 if (!buffer_len) { |
| 286 DCHECK(callback.is_null()); |
| 287 return; |
| 288 } |
| 289 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_DATA); |
| 290 work_item->set_buffer(buffer); |
| 291 work_item->set_buffer_len(buffer_len); |
| 292 work_item->set_address(address); |
| 293 work_item->set_offset(offset); |
| 294 work_item->set_user_callback(callback); |
| 295 work_item->set_owner_entry(entry); |
| 296 PostWorkItem(work_item); |
| 297 } |
| 298 |
| 299 void BackendImplV3::MoveData(EntryImplV3* entry, Addr source, |
| 300 Addr destination, int len, |
| 301 const CompletionCallback& callback) { |
| 302 DCHECK(len); |
| 303 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_MOVE_DATA); |
| 304 work_item->set_buffer_len(len); |
| 305 work_item->set_address(source); |
| 306 work_item->set_address2(destination); |
| 307 work_item->set_user_callback(callback); |
| 308 work_item->set_owner_entry(entry); |
| 309 PostWorkItem(work_item);//+delete source |
| 310 } |
| 311 |
| 312 void BackendImplV3::Truncate(EntryImplV3* entry, Addr address, int offset) { |
| 313 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_TRUNCATE); |
| 314 work_item->set_address(address); |
| 315 work_item->set_offset(offset); |
| 316 work_item->set_owner_entry(entry); |
| 317 PostWorkItem(work_item); |
| 318 } |
| 319 |
| 320 void BackendImplV3::Delete(EntryImplV3* entry, Addr address) { |
| 321 if (disabled_) |
| 322 return; |
| 323 if (address.is_separate_file()) { |
| 324 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_DELETE); |
| 325 work_item->set_address(address); |
| 326 work_item->set_owner_entry(entry); |
| 327 PostWorkItem(work_item); |
| 328 |
| 329 // And now delete the block itself. |
| 330 address = address.AsBlockFile(); |
| 331 } |
| 332 |
| 333 int size = Addr::BlockSizeForFileType(address.file_type()); |
| 334 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size)); |
| 335 memset(buffer->data(), 0, size); |
| 336 WriteData(entry, address, 0, buffer, size, net::CompletionCallback()); |
| 337 |
| 338 block_files_.DeleteBlock(address); |
| 339 } |
| 340 |
| 341 void BackendImplV3::Close(EntryImplV3* entry, Addr address) { |
| 342 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLOSE); |
| 343 work_item->set_address(address); |
| 344 work_item->set_owner_entry(entry); |
| 345 PostWorkItem(work_item); |
| 346 } |
| 347 |
| 348 bool BackendImplV3::EvictEntry(uint32 hash, Addr address) { |
234 EntriesMap::iterator it = open_entries_.find(address.value()); | 349 EntriesMap::iterator it = open_entries_.find(address.value()); |
235 if (it != open_entries_.end()) | 350 if (it != open_entries_.end()) |
236 open_entries_.erase(it); | 351 return false; |
237 } | 352 |
238 | 353 EntryCell old_cell = index_.FindEntryCell(hash, address); |
239 void BackendImpl::OnEntryDestroyEnd() { | 354 if (!old_cell.IsValid() || old_cell.GetState() != ENTRY_USED) |
240 DecreaseNumRefs(); | 355 return false; |
241 if (data_->header.num_bytes > max_size_ && !read_only_ && | 356 |
242 (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom)) | 357 EntrySet entries; |
243 eviction_.TrimCache(false); | 358 entries.cells.push_back(old_cell); |
244 } | 359 |
245 | 360 uint32 flags = WorkItem::WORK_FOR_EVICT; |
246 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const { | 361 if (lru_eviction_) { |
247 DCHECK(rankings->HasData()); | 362 flags |= WorkItem::WORK_NO_COPY; |
248 EntriesMap::const_iterator it = | 363 } else { |
249 open_entries_.find(rankings->Data()->contents); | 364 Addr new_address; |
| 365 if (!block_files_.CreateBlock(BLOCK_EVICTED, 1, &new_address)) |
| 366 return false; |
| 367 |
| 368 EntryCell new_cell = index_.CreateEntryCell(hash, new_address); |
| 369 if (!new_cell.IsValid()) { |
| 370 block_files_.DeleteBlock(new_address); |
| 371 return false; |
| 372 } |
| 373 entries.cells.push_back(new_cell); |
| 374 } |
| 375 |
| 376 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 377 work_item->set_flags(flags); |
| 378 work_item->set_entries(entries); |
| 379 PostWorkItem(work_item); |
| 380 |
| 381 return true; |
| 382 } |
| 383 |
| 384 EntryImplV3* BackendImplV3::GetOpenEntry(Addr address) const { |
| 385 EntriesMap::const_iterator it = open_entries_.find(address.value()); |
250 if (it != open_entries_.end()) { | 386 if (it != open_entries_.end()) { |
251 // We have this entry in memory. | 387 // We have this entry in memory. |
| 388 it->second->AddRef(); |
| 389 it->second->OnOpenEntry(); |
252 return it->second; | 390 return it->second; |
253 } | 391 } |
254 | 392 |
255 return NULL; | 393 return NULL; |
256 } | 394 } |
257 | 395 |
258 int BackendImpl::MaxFileSize() const { | 396 int BackendImplV3::MaxFileSize() const { |
259 return max_size_ / 8; | 397 return max_size_ / 8; |
260 } | 398 } |
261 | 399 |
262 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { | 400 void BackendImplV3::ModifyStorageSize(int32 old_size, int32 new_size) { |
263 if (disabled_ || old_size == new_size) | 401 if (disabled_ || old_size == new_size) |
264 return; | 402 return; |
265 if (old_size > new_size) | 403 if (old_size > new_size) |
266 SubstractStorageSize(old_size - new_size); | 404 SubstractStorageSize(old_size - new_size); |
267 else | 405 else |
268 AddStorageSize(new_size - old_size); | 406 AddStorageSize(new_size - old_size); |
269 | 407 |
270 FlushIndex(); | |
271 | |
272 // Update the usage statistics. | 408 // Update the usage statistics. |
273 stats_.ModifyStorageStats(old_size, new_size); | 409 stats_.ModifyStorageStats(old_size, new_size); |
274 } | 410 } |
275 | 411 |
276 void BackendImpl::TooMuchStorageRequested(int32 size) { | 412 void BackendImplV3::TooMuchStorageRequested(int32 size) { |
277 stats_.ModifyStorageStats(0, size); | 413 stats_.ModifyStorageStats(0, size); |
278 } | 414 } |
279 | 415 |
280 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) { | 416 bool BackendImplV3::IsAllocAllowed(int current_size, int new_size, bool force) { |
281 DCHECK_GT(new_size, current_size); | 417 DCHECK_GT(new_size, current_size); |
282 if (user_flags_ & kNoBuffering) | 418 if (!force && (user_flags_ & NO_BUFFERING)) |
283 return false; | 419 return false; |
284 | 420 |
285 int to_add = new_size - current_size; | 421 int to_add = new_size - current_size; |
286 if (buffer_bytes_ + to_add > MaxBuffersSize()) | 422 if (!force && (buffer_bytes_ + to_add > MaxBuffersSize())) |
287 return false; | 423 return false; |
288 | 424 |
289 buffer_bytes_ += to_add; | 425 buffer_bytes_ += to_add; |
290 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024); | 426 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024); |
291 return true; | 427 return true; |
292 } | 428 } |
293 | 429 |
294 void BackendImpl::BufferDeleted(int size) { | 430 void BackendImplV3::BufferDeleted(int size) { |
| 431 DCHECK_GE(size, 0); |
295 buffer_bytes_ -= size; | 432 buffer_bytes_ -= size; |
296 DCHECK_GE(size, 0); | 433 DCHECK_GE(buffer_bytes_, 0); |
297 } | 434 } |
298 | 435 |
299 bool BackendImpl::IsLoaded() const { | 436 bool BackendImplV3::IsLoaded() const { |
300 CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_); | 437 if (user_flags_ & NO_LOAD_PROTECTION) |
301 if (user_flags_ & kNoLoadProtection) | |
302 return false; | 438 return false; |
303 | 439 |
304 return (num_pending_io_ > 5 || user_load_); | 440 return user_load_; |
305 } | 441 } |
306 | 442 |
307 std::string BackendImpl::HistogramName(const char* name, int experiment) const { | 443 base::Time BackendImplV3::GetCurrentTime() const { |
| 444 Time base_time = Time::Now(); |
| 445 if (!test_seconds_) |
| 446 return base_time; |
| 447 |
| 448 return base_time + TimeDelta::FromSeconds(test_seconds_); |
| 449 } |
| 450 |
| 451 std::string BackendImplV3::HistogramName(const char* name, |
| 452 int experiment) const { |
| 453 static const char* names[] = { "Http", "", "Media", "AppCache", "Shader" }; |
| 454 DCHECK_NE(cache_type_, net::MEMORY_CACHE); |
308 if (!experiment) | 455 if (!experiment) |
309 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name); | 456 return base::StringPrintf("DiskCache3.%s.%s", names[cache_type_], name); |
310 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_, | 457 return base::StringPrintf("DiskCache3.%s.%s_%d", names[cache_type_], |
311 name, experiment); | 458 name, experiment); |
312 } | 459 } |
313 | 460 |
314 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() { | 461 base::WeakPtr<BackendImplV3> BackendImplV3::GetWeakPtr() { |
315 return ptr_factory_.GetWeakPtr(); | 462 return ptr_factory_.GetWeakPtr(); |
316 } | 463 } |
317 | 464 |
318 // We want to remove biases from some histograms so we only send data once per | 465 // We want to remove biases from some histograms so we only send data once per |
319 // week. | 466 // week. |
320 bool BackendImpl::ShouldReportAgain() { | 467 bool BackendImplV3::ShouldReportAgain() { |
321 if (uma_report_) | 468 if (uma_report_) |
322 return uma_report_ == 2; | 469 return uma_report_ == 2; |
323 | 470 |
324 uma_report_++; | 471 uma_report_++; |
325 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); | 472 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); |
326 Time last_time = Time::FromInternalValue(last_report); | 473 Time last_time = Time::FromInternalValue(last_report); |
327 if (!last_report || (Time::Now() - last_time).InDays() >= 7) { | 474 if (!last_report || (GetCurrentTime() - last_time).InDays() >= 7) { |
328 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); | 475 stats_.SetCounter(Stats::LAST_REPORT, GetCurrentTime().ToInternalValue()); |
329 uma_report_++; | 476 uma_report_++; |
330 return true; | 477 return true; |
331 } | 478 } |
332 return false; | 479 return false; |
333 } | 480 } |
334 | 481 |
335 void BackendImpl::FirstEviction() { | 482 void BackendImplV3::FirstEviction() { |
336 DCHECK(data_->header.create_time); | 483 IndexHeaderV3* header = index_.header(); |
| 484 header->flags |= CACHE_EVICTED; |
| 485 DCHECK(header->create_time); |
337 if (!GetEntryCount()) | 486 if (!GetEntryCount()) |
338 return; // This is just for unit tests. | 487 return; // This is just for unit tests. |
339 | 488 |
340 Time create_time = Time::FromInternalValue(data_->header.create_time); | 489 Time create_time = Time::FromInternalValue(header->create_time); |
341 CACHE_UMA(AGE, "FillupAge", 0, create_time); | 490 CACHE_UMA(AGE, "FillupAge", 0, create_time); |
342 | 491 |
343 int64 use_time = stats_.GetCounter(Stats::TIMER); | 492 int64 use_time = stats_.GetCounter(Stats::TIMER); |
344 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120)); | 493 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120)); |
345 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio()); | 494 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio()); |
346 | 495 |
347 if (!use_time) | 496 if (!use_time) |
348 use_time = 1; | 497 use_time = 1; |
349 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0, | 498 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0, |
350 static_cast<int>(data_->header.num_entries / use_time)); | 499 static_cast<int>(header->num_entries / use_time)); |
351 CACHE_UMA(COUNTS, "FirstByteIORate", 0, | 500 CACHE_UMA(COUNTS, "FirstByteIORate", 0, |
352 static_cast<int>((data_->header.num_bytes / 1024) / use_time)); | 501 static_cast<int>((header->num_bytes / 1024) / use_time)); |
353 | 502 |
354 int avg_size = data_->header.num_bytes / GetEntryCount(); | 503 int avg_size = header->num_bytes / GetEntryCount(); |
355 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size); | 504 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size); |
356 | 505 |
357 int large_entries_bytes = stats_.GetLargeEntriesSize(); | 506 int large_entries_bytes = stats_.GetLargeEntriesSize(); |
358 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; | 507 int large_ratio = large_entries_bytes * 100 / header->num_bytes; |
359 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio); | 508 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio); |
360 | 509 |
361 if (new_eviction_) { | 510 if (!lru_eviction_) { |
362 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); | 511 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); |
363 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, | 512 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, |
364 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); | 513 header->num_no_use_entries * 100 / header->num_entries); |
365 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, | 514 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, |
366 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); | 515 header->num_low_use_entries * 100 / header->num_entries); |
367 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, | 516 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, |
368 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); | 517 header->num_high_use_entries * 100 / header->num_entries); |
369 } | 518 } |
370 | 519 |
371 stats_.ResetRatios(); | 520 stats_.ResetRatios(); |
372 } | 521 } |
373 | 522 |
374 void BackendImpl::OnEvent(Stats::Counters an_event) { | 523 void BackendImplV3::OnEvent(Stats::Counters an_event) { |
375 stats_.OnEvent(an_event); | 524 stats_.OnEvent(an_event); |
376 } | 525 } |
377 | 526 |
378 void BackendImpl::OnRead(int32 bytes) { | 527 void BackendImplV3::OnRead(int32 bytes) { |
379 DCHECK_GE(bytes, 0); | 528 DCHECK_GE(bytes, 0); |
380 byte_count_ += bytes; | 529 byte_count_ += bytes; |
381 if (byte_count_ < 0) | 530 if (byte_count_ < 0) |
382 byte_count_ = kint32max; | 531 byte_count_ = kint32max; |
383 } | 532 } |
384 | 533 |
385 void BackendImpl::OnWrite(int32 bytes) { | 534 void BackendImplV3::OnWrite(int32 bytes) { |
386 // We use the same implementation as OnRead... just log the number of bytes. | 535 // We use the same implementation as OnRead... just log the number of bytes. |
387 OnRead(bytes); | 536 OnRead(bytes); |
388 } | 537 } |
389 | 538 |
390 void BackendImpl::OnStatsTimer() { | 539 void BackendImplV3::GrowBlockFiles() { |
| 540 if (growing_files_ || disabled_) |
| 541 return; |
| 542 growing_files_ = true; |
| 543 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_GROW_FILES); |
| 544 PostWorkItem(work_item); |
| 545 } |
| 546 |
| 547 void BackendImplV3::OnTimerTick() { |
| 548 if (disabled_) |
| 549 return; |
| 550 |
391 stats_.OnEvent(Stats::TIMER); | 551 stats_.OnEvent(Stats::TIMER); |
392 int64 time = stats_.GetCounter(Stats::TIMER); | 552 int64 time = stats_.GetCounter(Stats::TIMER); |
393 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); | 553 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); |
394 | 554 |
395 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding | 555 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding |
396 // the bias towards 0. | 556 // the bias towards 0. |
397 if (num_refs_ && (current != num_refs_)) { | 557 if (num_refs_ && (current != num_refs_)) { |
398 int64 diff = (num_refs_ - current) / 50; | 558 int64 diff = (num_refs_ - current) / 50; |
399 if (!diff) | 559 if (!diff) |
400 diff = num_refs_ > current ? 1 : -1; | 560 diff = num_refs_ > current ? 1 : -1; |
401 current = current + diff; | 561 current = current + diff; |
402 stats_.SetCounter(Stats::OPEN_ENTRIES, current); | 562 stats_.SetCounter(Stats::OPEN_ENTRIES, current); |
403 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); | 563 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); |
404 } | 564 } |
405 | 565 |
406 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_); | 566 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_); |
407 | 567 |
408 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_); | 568 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_); |
409 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024); | 569 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024); |
410 | 570 |
411 // These values cover about 99.5% of the population (Oct 2011). | 571 // These values cover about 99.5% of the population (Oct 2011). |
412 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024); | 572 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024); |
413 entry_count_ = 0; | 573 entry_count_ = 0; |
414 byte_count_ = 0; | 574 byte_count_ = 0; |
415 up_ticks_++; | 575 up_ticks_++; |
416 | 576 |
417 if (!data_) | |
418 first_timer_ = false; | |
419 if (first_timer_) { | 577 if (first_timer_) { |
420 first_timer_ = false; | 578 first_timer_ = false; |
421 if (ShouldReportAgain()) | 579 if (ShouldReportAgain()) |
422 ReportStats(); | 580 ReportStats(); |
423 } | 581 } |
424 | 582 |
| 583 index_.OnBackupTimer(); |
| 584 CloseDoomedEntries(); |
| 585 ReleaseRecentEntries(); |
| 586 UpdateDeletedEntries(); |
| 587 |
425 // Save stats to disk at 5 min intervals. | 588 // Save stats to disk at 5 min intervals. |
426 if (time % 10 == 0) | 589 if (time % 10 == 0) |
427 StoreStats(); | 590 StoreStats(); |
428 } | 591 } |
429 | 592 |
430 void BackendImpl::SetUnitTestMode() { | 593 void BackendImplV3::SetUnitTestMode() { |
431 user_flags_ |= kUnitTestMode; | 594 user_flags_ |= UNIT_TEST_MODE; |
432 unit_test_ = true; | |
433 } | 595 } |
434 | 596 |
435 void BackendImpl::SetUpgradeMode() { | 597 void BackendImplV3::SetUpgradeMode() { |
436 user_flags_ |= kUpgradeMode; | 598 user_flags_ |= UPGRADE_MODE; |
437 read_only_ = true; | 599 read_only_ = true; |
438 } | 600 } |
439 | 601 |
440 void BackendImpl::SetNewEviction() { | 602 void BackendImplV3::SetNewEviction() { |
441 user_flags_ |= kNewEviction; | 603 user_flags_ |= EVICTION_V2; |
442 new_eviction_ = true; | 604 lru_eviction_ = false; |
443 } | 605 } |
444 | 606 |
445 void BackendImpl::SetFlags(uint32 flags) { | 607 void BackendImplV3::SetFlags(uint32 flags) { |
446 user_flags_ |= flags; | 608 user_flags_ |= flags; |
447 } | 609 } |
448 | 610 |
449 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) { | 611 int BackendImplV3::FlushQueueForTest(const CompletionCallback& callback) { |
450 background_queue_.FlushQueue(callback); | 612 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_NONE); |
| 613 work_item->set_user_callback(callback); |
| 614 PostWorkItem(work_item); |
451 return net::ERR_IO_PENDING; | 615 return net::ERR_IO_PENDING; |
452 } | 616 } |
453 | 617 |
454 void BackendImpl::TrimForTest(bool empty) { | 618 int BackendImplV3::CleanupForTest(const CompletionCallback& callback) { |
455 eviction_.SetTestMode(); | 619 CloseDoomedEntries(); |
456 eviction_.TrimCache(empty); | 620 ReleaseRecentEntries(); |
| 621 UpdateDeletedEntries(); |
| 622 index_.OnBackupTimer(); |
| 623 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLEANUP); |
| 624 work_item->set_user_callback(callback); |
| 625 PostWorkItem(work_item); |
| 626 worker_ = NULL; |
| 627 init_ = false; |
| 628 disabled_ = true; |
| 629 index_.Reset(); |
| 630 return net::ERR_IO_PENDING; |
457 } | 631 } |
458 | 632 |
459 void BackendImpl::TrimDeletedListForTest(bool empty) { | 633 void BackendImplV3::TrimForTest(bool empty) { |
| 634 eviction_.SetTestMode(); |
| 635 if (empty) |
| 636 eviction_.TrimAllCache(CompletionCallback()); |
| 637 else |
| 638 eviction_.TrimCache(); |
| 639 } |
| 640 |
| 641 void BackendImplV3::TrimDeletedListForTest(bool empty) { |
460 eviction_.SetTestMode(); | 642 eviction_.SetTestMode(); |
461 eviction_.TrimDeletedList(empty); | 643 eviction_.TrimDeletedList(empty); |
462 } | 644 } |
463 | 645 |
464 int BackendImpl::SelfCheck() { | 646 void BackendImplV3::AddDelayForTest(int seconds) { |
| 647 Trace("Add %d deconds", seconds); |
| 648 int old_timers = test_seconds_ / kTimerSeconds; |
| 649 test_seconds_ += seconds; |
| 650 if (old_timers != test_seconds_ / kTimerSeconds) |
| 651 OnTimerTick(); |
| 652 } |
| 653 |
| 654 int BackendImplV3::WaitForEntryToCloseForTest( |
| 655 const std::string& key, |
| 656 const CompletionCallback& callback) { |
| 657 DCHECK(!callback.is_null()); |
| 658 if (disabled_ || key.empty()) |
| 659 return net::ERR_FAILED; |
| 660 |
| 661 uint32 hash = base::Hash(key); |
| 662 |
| 663 EntrySet entries = index_.LookupEntry(hash); |
| 664 if (!entries.cells.size()) |
| 665 return net::OK; |
| 666 |
| 667 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
| 668 return net::OK; |
| 669 |
| 670 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
| 671 if (open_entry) { |
| 672 open_entry->NotifyDestructionForTest(callback); |
| 673 open_entry->Close(); |
| 674 return net::ERR_IO_PENDING; |
| 675 } |
| 676 |
| 677 return net::OK; |
| 678 } |
| 679 |
| 680 int BackendImplV3::SelfCheck() { |
465 if (!init_) { | 681 if (!init_) { |
466 LOG(ERROR) << "Init failed"; | 682 LOG(ERROR) << "Init failed"; |
467 return ERR_INIT_FAILED; | 683 return ERR_INIT_FAILED; |
468 } | 684 } |
469 | 685 |
470 int num_entries = rankings_.SelfCheck(); | 686 /*int num_entries = rankings_.SelfCheck(); |
471 if (num_entries < 0) { | 687 if (num_entries < 0) { |
472 LOG(ERROR) << "Invalid rankings list, error " << num_entries; | 688 LOG(ERROR) << "Invalid rankings list, error " << num_entries; |
473 #if !defined(NET_BUILD_STRESS_CACHE) | 689 #if !defined(NET_BUILD_STRESS_CACHE) |
474 return num_entries; | 690 return num_entries; |
475 #endif | 691 #endif |
476 } | 692 } |
477 | 693 |
478 if (num_entries != data_->header.num_entries) { | 694 if (num_entries != index_.header()->num_entries) { |
479 LOG(ERROR) << "Number of entries mismatch"; | 695 LOG(ERROR) << "Number of entries mismatch"; |
480 #if !defined(NET_BUILD_STRESS_CACHE) | 696 #if !defined(NET_BUILD_STRESS_CACHE) |
481 return ERR_NUM_ENTRIES_MISMATCH; | 697 return ERR_NUM_ENTRIES_MISMATCH; |
482 #endif | 698 #endif |
483 } | 699 }*/ |
484 | 700 |
485 return CheckAllEntries(); | 701 return CheckAllEntries(); |
486 } | 702 } |
487 | 703 |
| 704 void BackendImplV3::GrowIndex() { |
| 705 if (growing_index_ || disabled_) |
| 706 return; |
| 707 growing_index_ = true; |
| 708 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_GROW_INDEX); |
| 709 PostWorkItem(work_item); |
| 710 } |
| 711 |
| 712 void BackendImplV3::SaveIndex(net::IOBuffer* buffer, int buffer_len) { |
| 713 if (disabled_ || !buffer_len) |
| 714 return; |
| 715 |
| 716 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_INDEX); |
| 717 work_item->set_buffer(buffer); |
| 718 work_item->set_buffer_len(buffer_len); |
| 719 work_item->set_offset(0); |
| 720 PostWorkItem(work_item); |
| 721 } |
| 722 |
| 723 void BackendImplV3::DeleteCell(EntryCell cell) { |
| 724 NOTREACHED(); |
| 725 // Post task to delete this cell. |
| 726 // look at a local map of cells being deleted. |
| 727 } |
| 728 |
| 729 void BackendImplV3::FixCell(EntryCell cell) { |
| 730 NOTREACHED(); |
| 731 } |
| 732 |
488 // ------------------------------------------------------------------------ | 733 // ------------------------------------------------------------------------ |
489 | 734 |
490 net::CacheType BackendImpl::GetCacheType() const { | 735 net::CacheType BackendImplV3::GetCacheType() const { |
491 return cache_type_; | 736 return cache_type_; |
492 } | 737 } |
493 | 738 |
494 int32 BackendImpl::GetEntryCount() const { | 739 int32 BackendImplV3::GetEntryCount() const { |
495 if (!index_.get() || disabled_) | 740 if (disabled_) |
496 return 0; | 741 return 0; |
497 // num_entries includes entries already evicted. | 742 DCHECK(init_); |
498 int32 not_deleted = data_->header.num_entries - | 743 return index_.header()->num_entries; |
499 data_->header.lru.sizes[Rankings::DELETED]; | 744 } |
500 | 745 |
501 if (not_deleted < 0) { | 746 int BackendImplV3::OpenEntry(const std::string& key, Entry** entry, |
502 NOTREACHED(); | 747 const CompletionCallback& callback) { |
503 not_deleted = 0; | 748 DCHECK(!callback.is_null()); |
504 } | 749 if (disabled_ || key.empty()) |
505 | 750 return net::ERR_FAILED; |
506 return not_deleted; | 751 |
507 } | |
508 | |
509 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) { | |
510 if (disabled_) | |
511 return NULL; | |
512 | |
513 TimeTicks start = TimeTicks::Now(); | |
514 uint32 hash = base::Hash(key); | 752 uint32 hash = base::Hash(key); |
515 Trace("Open hash 0x%x", hash); | 753 Trace("Open hash 0x%x", hash); |
516 | 754 |
517 bool error; | 755 EntrySet entries = index_.LookupEntry(hash); |
518 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); | 756 if (!entries.cells.size()) |
519 if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) { | 757 return net::ERR_FAILED; |
520 // The entry was already evicted. | 758 |
521 cache_entry->Release(); | 759 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
522 cache_entry = NULL; | 760 return net::ERR_FAILED; |
523 } | 761 |
524 | 762 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
525 int current_size = data_->header.num_bytes / (1024 * 1024); | 763 if (open_entry) { |
526 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; | 764 *entry = open_entry; |
527 int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; | 765 eviction_.OnOpenEntry(open_entry); |
528 int64 use_hours = total_hours - no_use_hours; | 766 entry_count_++; |
529 | 767 |
530 if (!cache_entry) { | 768 Trace("Open hash 0x%x end: 0x%x", hash, open_entry->GetAddress().value()); |
531 CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start); | 769 stats_.OnEvent(Stats::OPEN_HIT); |
532 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size); | 770 SIMPLE_STATS_COUNTER("disk_cache.hit"); |
533 CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours); | 771 return net::OK; |
534 CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours); | 772 } |
535 stats_.OnEvent(Stats::OPEN_MISS); | 773 |
536 return NULL; | 774 // Read the entry from disk. |
537 } | 775 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
538 | 776 work_item->set_entries(entries); |
539 eviction_.OnOpenEntry(cache_entry); | 777 work_item->set_user_callback(callback); |
540 entry_count_++; | 778 work_item->set_key(key); |
541 | 779 work_item->set_entry_buffer(entry); |
542 Trace("Open hash 0x%x end: 0x%x", hash, | 780 PostWorkItem(work_item); |
543 cache_entry->entry()->address().value()); | 781 |
544 CACHE_UMA(AGE_MS, "OpenTime", 0, start); | 782 return net::ERR_IO_PENDING; |
545 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size); | 783 } |
546 CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours); | 784 |
547 CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours); | 785 int BackendImplV3::CreateEntry(const std::string& key, Entry** entry, |
548 stats_.OnEvent(Stats::OPEN_HIT); | 786 const CompletionCallback& callback) { |
549 SIMPLE_STATS_COUNTER("disk_cache.hit"); | 787 DCHECK(init_); |
550 return cache_entry; | 788 DCHECK(!callback.is_null()); |
551 } | 789 if (disabled_ || key.empty() || key.size() > kMaxKeySize) |
552 | 790 return net::ERR_FAILED; |
553 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { | 791 |
| 792 uint32 hash = base::Hash(key); |
| 793 Trace("Create hash 0x%x", hash); |
| 794 |
| 795 EntrySet entries = index_.LookupEntry(hash); |
| 796 if (entries.cells.size()) { |
| 797 if (entries.cells.size() != static_cast<size_t>(entries.evicted_count)) { |
| 798 // but we may have a hash collision :(. So create a work item to check it
here!. |
| 799 // kep collission specfic map |
| 800 return net::ERR_FAILED; |
| 801 } |
| 802 |
| 803 // On the other hand, we have only deleted items that we may resurrect. |
| 804 // Read the entry from disk. |
| 805 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 806 work_item->set_flags(WorkItem::WORK_FOR_RESURRECT); |
| 807 work_item->set_entries(entries); |
| 808 work_item->set_user_callback(callback); |
| 809 work_item->set_key(key); |
| 810 work_item->set_entry_buffer(entry); |
| 811 PostWorkItem(work_item); |
| 812 |
| 813 return net::ERR_IO_PENDING; |
| 814 } |
| 815 return OnCreateEntryComplete(key, hash, NULL, entry, callback); |
| 816 } |
| 817 |
| 818 int BackendImplV3::DoomEntry(const std::string& key, |
| 819 const CompletionCallback& callback) { |
| 820 DCHECK(!callback.is_null()); |
554 if (disabled_ || key.empty()) | 821 if (disabled_ || key.empty()) |
555 return NULL; | 822 return net::ERR_FAILED; |
556 | 823 |
557 TimeTicks start = TimeTicks::Now(); | 824 uint32 hash = base::Hash(key); |
558 Trace("Create hash 0x%x", hash); | 825 Trace("DoomEntry hash 0x%x", hash); |
559 | 826 |
560 scoped_refptr<EntryImpl> parent; | 827 EntrySet entries = index_.LookupEntry(hash); |
561 Addr entry_address(data_->table[hash & mask_]); | 828 if (!entries.cells.size()) |
562 if (entry_address.is_initialized()) { | 829 return net::ERR_FAILED; |
563 // We have an entry already. It could be the one we are looking for, or just | 830 |
564 // a hash conflict. | 831 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
565 bool error; | 832 return net::ERR_FAILED; |
566 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error); | 833 |
567 if (old_entry) | 834 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
568 return ResurrectEntry(old_entry); | 835 if (open_entry) { |
569 | 836 open_entry->Doom(); |
570 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error); | 837 open_entry->Close(); |
571 DCHECK(!error); | 838 return net::OK; |
572 if (parent_entry) { | 839 } |
573 parent.swap(&parent_entry); | 840 |
574 } else if (data_->table[hash & mask_]) { | 841 // Read the entry from disk. |
575 // We should have corrected the problem. | 842 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
576 NOTREACHED(); | 843 work_item->set_flags(WorkItem::WORK_FOR_DOOM); |
577 return NULL; | 844 work_item->set_entries(entries); |
578 } | 845 work_item->set_user_callback(callback); |
579 } | 846 work_item->set_key(key); |
580 | 847 PostWorkItem(work_item); |
581 // The general flow is to allocate disk space and initialize the entry data, | 848 |
582 // followed by saving that to disk, then linking the entry though the index | 849 return net::ERR_IO_PENDING; |
583 // and finally through the lists. If there is a crash in this process, we may | 850 } |
584 // end up with: | 851 |
585 // a. Used, unreferenced empty blocks on disk (basically just garbage). | 852 int BackendImplV3::DoomAllEntries(const CompletionCallback& callback) { |
586 // b. Used, unreferenced but meaningful data on disk (more garbage). | |
587 // c. A fully formed entry, reachable only through the index. | |
588 // d. A fully formed entry, also reachable through the lists, but still dirty. | |
589 // | |
590 // Anything after (b) can be automatically cleaned up. We may consider saving | |
591 // the current operation (as we do while manipulating the lists) so that we | |
592 // can detect and cleanup (a) and (b). | |
593 | |
594 int num_blocks = EntryImpl::NumBlocksForEntry(key.size()); | |
595 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { | |
596 LOG(ERROR) << "Create entry failed " << key.c_str(); | |
597 stats_.OnEvent(Stats::CREATE_ERROR); | |
598 return NULL; | |
599 } | |
600 | |
601 Addr node_address(0); | |
602 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { | |
603 block_files_.DeleteBlock(entry_address, false); | |
604 LOG(ERROR) << "Create entry failed " << key.c_str(); | |
605 stats_.OnEvent(Stats::CREATE_ERROR); | |
606 return NULL; | |
607 } | |
608 | |
609 scoped_refptr<EntryImpl> cache_entry( | |
610 new EntryImpl(this, entry_address, false)); | |
611 IncreaseNumRefs(); | |
612 | |
613 if (!cache_entry->CreateEntry(node_address, key, hash)) { | |
614 block_files_.DeleteBlock(entry_address, false); | |
615 block_files_.DeleteBlock(node_address, false); | |
616 LOG(ERROR) << "Create entry failed " << key.c_str(); | |
617 stats_.OnEvent(Stats::CREATE_ERROR); | |
618 return NULL; | |
619 } | |
620 | |
621 cache_entry->BeginLogging(net_log_, true); | |
622 | |
623 // We are not failing the operation; let's add this to the map. | |
624 open_entries_[entry_address.value()] = cache_entry.get(); | |
625 | |
626 // Save the entry. | |
627 cache_entry->entry()->Store(); | |
628 cache_entry->rankings()->Store(); | |
629 IncreaseNumEntries(); | |
630 entry_count_++; | |
631 | |
632 // Link this entry through the index. | |
633 if (parent.get()) { | |
634 parent->SetNextAddress(entry_address); | |
635 } else { | |
636 data_->table[hash & mask_] = entry_address.value(); | |
637 } | |
638 | |
639 // Link this entry through the lists. | |
640 eviction_.OnCreateEntry(cache_entry.get()); | |
641 | |
642 CACHE_UMA(AGE_MS, "CreateTime", 0, start); | |
643 stats_.OnEvent(Stats::CREATE_HIT); | |
644 SIMPLE_STATS_COUNTER("disk_cache.miss"); | |
645 Trace("create entry hit "); | |
646 FlushIndex(); | |
647 cache_entry->AddRef(); | |
648 return cache_entry.get(); | |
649 } | |
650 | |
651 int BackendImpl::SyncDoomEntry(const std::string& key) { | |
652 if (disabled_) | 853 if (disabled_) |
653 return net::ERR_FAILED; | 854 return net::ERR_FAILED; |
654 | 855 |
655 EntryImpl* entry = OpenEntryImpl(key); | |
656 if (!entry) | |
657 return net::ERR_FAILED; | |
658 | |
659 entry->DoomImpl(); | |
660 entry->Release(); | |
661 return net::OK; | |
662 } | |
663 | |
664 int BackendImpl::SyncDoomAllEntries() { | |
665 // This is not really an error, but it is an interesting condition. | 856 // This is not really an error, but it is an interesting condition. |
666 ReportError(ERR_CACHE_DOOMED); | 857 ReportError(ERR_CACHE_DOOMED); |
667 stats_.OnEvent(Stats::DOOM_CACHE); | 858 stats_.OnEvent(Stats::DOOM_CACHE); |
668 if (!num_refs_) { | 859 if (!num_refs_) { |
669 RestartCache(false); | 860 RestartCache(callback); |
670 return disabled_ ? net::ERR_FAILED : net::OK; | 861 return init_ ? net::OK : net::ERR_IO_PENDING; |
671 } else { | 862 } |
672 if (disabled_) | 863 return eviction_.TrimAllCache(callback); |
673 return net::ERR_FAILED; | 864 } |
674 | 865 |
675 eviction_.TrimCache(true); | 866 int BackendImplV3::DoomEntriesBetween(base::Time initial_time, |
| 867 base::Time end_time, |
| 868 const CompletionCallback& callback) { |
| 869 DCHECK_NE(net::APP_CACHE, cache_type_); |
| 870 Time now = GetCurrentTime(); |
| 871 if (end_time.is_null() || end_time > now) |
| 872 end_time = now; |
| 873 |
| 874 DCHECK(end_time >= initial_time); |
| 875 |
| 876 if (disabled_) |
| 877 return net::ERR_FAILED; |
| 878 |
| 879 scoped_ptr<IndexIterator> to_delete(new IndexIterator); |
| 880 to_delete->forward = false; |
| 881 to_delete->timestamp = index_.CalculateTimestamp(end_time) + 1; |
| 882 |
| 883 // Prepare to read the first entry from disk. |
| 884 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 885 work_item->set_flags(WorkItem::WORK_FOR_ITERATION | |
| 886 WorkItem::WORK_FOR_DOOM_RANGE); |
| 887 work_item->set_initial_time(initial_time); |
| 888 work_item->set_end_time(end_time); |
| 889 work_item->set_iterator(to_delete.Pass()); |
| 890 |
| 891 if (OpenNext(work_item) != net::ERR_IO_PENDING) |
676 return net::OK; | 892 return net::OK; |
677 } | 893 |
678 } | 894 work_item->set_user_callback(callback); |
679 | 895 return net::ERR_IO_PENDING; |
680 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time, | 896 } |
681 const base::Time end_time) { | 897 |
| 898 int BackendImplV3::DoomEntriesSince(base::Time initial_time, |
| 899 const CompletionCallback& callback) { |
682 DCHECK_NE(net::APP_CACHE, cache_type_); | 900 DCHECK_NE(net::APP_CACHE, cache_type_); |
683 if (end_time.is_null()) | 901 return DoomEntriesBetween(initial_time, GetCurrentTime(), callback); |
684 return SyncDoomEntriesSince(initial_time); | 902 } |
685 | 903 |
686 DCHECK(end_time >= initial_time); | 904 int BackendImplV3::OpenNextEntry(void** iter, Entry** next_entry, |
687 | 905 const CompletionCallback& callback) { |
| 906 DCHECK(!callback.is_null()); |
| 907 return OpenFollowingEntry(false, iter, next_entry, callback); |
| 908 } |
| 909 |
| 910 void BackendImplV3::EndEnumeration(void** iter) { |
| 911 scoped_ptr<IndexIterator> iterator( |
| 912 reinterpret_cast<IndexIterator*>(*iter)); |
| 913 *iter = NULL; |
| 914 } |
| 915 |
| 916 void BackendImplV3::GetStats(StatsItems* stats) { |
688 if (disabled_) | 917 if (disabled_) |
689 return net::ERR_FAILED; | |
690 | |
691 EntryImpl* node; | |
692 void* iter = NULL; | |
693 EntryImpl* next = OpenNextEntryImpl(&iter); | |
694 if (!next) | |
695 return net::OK; | |
696 | |
697 while (next) { | |
698 node = next; | |
699 next = OpenNextEntryImpl(&iter); | |
700 | |
701 if (node->GetLastUsed() >= initial_time && | |
702 node->GetLastUsed() < end_time) { | |
703 node->DoomImpl(); | |
704 } else if (node->GetLastUsed() < initial_time) { | |
705 if (next) | |
706 next->Release(); | |
707 next = NULL; | |
708 SyncEndEnumeration(iter); | |
709 } | |
710 | |
711 node->Release(); | |
712 } | |
713 | |
714 return net::OK; | |
715 } | |
716 | |
717 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get | |
718 // entries that are too old. | |
719 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) { | |
720 DCHECK_NE(net::APP_CACHE, cache_type_); | |
721 if (disabled_) | |
722 return net::ERR_FAILED; | |
723 | |
724 stats_.OnEvent(Stats::DOOM_RECENT); | |
725 for (;;) { | |
726 void* iter = NULL; | |
727 EntryImpl* entry = OpenNextEntryImpl(&iter); | |
728 if (!entry) | |
729 return net::OK; | |
730 | |
731 if (initial_time > entry->GetLastUsed()) { | |
732 entry->Release(); | |
733 SyncEndEnumeration(iter); | |
734 return net::OK; | |
735 } | |
736 | |
737 entry->DoomImpl(); | |
738 entry->Release(); | |
739 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator. | |
740 } | |
741 } | |
742 | |
743 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, | |
744 const CompletionCallback& callback) { | |
745 DCHECK(!callback.is_null()); | |
746 background_queue_.OpenNextEntry(iter, next_entry, callback); | |
747 return net::ERR_IO_PENDING; | |
748 } | |
749 | |
750 void BackendImpl::EndEnumeration(void** iter) { | |
751 background_queue_.EndEnumeration(*iter); | |
752 *iter = NULL; | |
753 } | |
754 | |
755 void BackendImpl::GetStats(StatsItems* stats) { | |
756 if (disabled_) | |
757 return; | 918 return; |
758 | 919 |
759 std::pair<std::string, std::string> item; | 920 std::pair<std::string, std::string> item; |
760 | 921 |
761 item.first = "Entries"; | 922 item.first = "Entries"; |
762 item.second = base::StringPrintf("%d", data_->header.num_entries); | 923 item.second = base::StringPrintf("%d", index_.header()->num_entries); |
763 stats->push_back(item); | |
764 | |
765 item.first = "Pending IO"; | |
766 item.second = base::StringPrintf("%d", num_pending_io_); | |
767 stats->push_back(item); | 924 stats->push_back(item); |
768 | 925 |
769 item.first = "Max size"; | 926 item.first = "Max size"; |
770 item.second = base::StringPrintf("%d", max_size_); | 927 item.second = base::StringPrintf("%d", max_size_); |
771 stats->push_back(item); | 928 stats->push_back(item); |
772 | 929 |
773 item.first = "Current size"; | 930 item.first = "Current size"; |
774 item.second = base::StringPrintf("%d", data_->header.num_bytes); | 931 item.second = base::StringPrintf("%d", index_.header()->num_bytes); |
775 stats->push_back(item); | 932 stats->push_back(item); |
776 | 933 |
777 item.first = "Cache type"; | |
778 item.second = "Blockfile Cache"; | |
779 stats->push_back(item); | |
780 | |
781 stats_.GetItems(stats); | 934 stats_.GetItems(stats); |
782 } | 935 } |
783 | 936 |
784 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) { | 937 void BackendImplV3::OnExternalCacheHit(const std::string& key) { |
785 if (disabled_) | 938 if (disabled_ || key.empty()) |
786 return; | 939 return; |
787 | 940 |
788 uint32 hash = base::Hash(key); | 941 uint32 hash = base::Hash(key); |
789 bool error; | 942 EntrySet entries = index_.LookupEntry(hash); |
790 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); | 943 if (!entries.cells.size()) |
791 if (cache_entry) { | 944 return; |
792 if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) { | 945 |
793 UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE); | 946 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
| 947 return; |
| 948 |
| 949 for (size_t i = 0; i < entries.cells.size(); i++) { |
| 950 if (entries.cells[i].GetGroup() == ENTRY_EVICTED) |
| 951 continue; |
| 952 |
| 953 index_.UpdateTime(hash, entries.cells[i].GetAddress(), GetCurrentTime()); |
| 954 } |
| 955 |
| 956 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
| 957 if (open_entry) { |
| 958 eviction_.OnOpenEntry(open_entry); |
| 959 entry_count_++; |
| 960 UpdateRank(open_entry, true); |
| 961 open_entry->Close(); |
| 962 return; |
| 963 } |
| 964 |
| 965 if (user_flags_ & UNIT_TEST_MODE) { |
| 966 for (size_t i = 0; i < entries.cells.size(); i++) { |
| 967 // This method doesn't have a callback, and it may take a while for the |
| 968 // operation to complete so update the time of any entry with this hash. |
| 969 if (entries.cells[i].GetGroup() != ENTRY_EVICTED) { |
| 970 index_.UpdateTime(hash, entries.cells[i].GetAddress(), |
| 971 GetCurrentTime()); |
| 972 } |
794 } | 973 } |
795 cache_entry->Release(); | 974 } |
796 } | 975 |
| 976 // Read the entry from disk. |
| 977 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 978 work_item->set_flags(WorkItem::WORK_FOR_UPDATE); |
| 979 work_item->set_entries(entries); |
| 980 work_item->set_key(key); |
| 981 PostWorkItem(work_item); |
797 } | 982 } |
798 | 983 |
799 // ------------------------------------------------------------------------ | 984 // ------------------------------------------------------------------------ |
800 | 985 |
801 // The maximum cache size will be either set explicitly by the caller, or | 986 // The maximum cache size will be either set explicitly by the caller, or |
802 // calculated by this code. | 987 // calculated by this code. |
803 void BackendImpl::AdjustMaxCacheSize(int table_len) { | 988 void BackendImplV3::AdjustMaxCacheSize() { |
804 if (max_size_) | 989 if (max_size_) |
805 return; | 990 return; |
806 | 991 |
807 // If table_len is provided, the index file exists. | |
808 DCHECK(!table_len || data_->header.magic); | |
809 | |
810 // The user is not setting the size, let's figure it out. | 992 // The user is not setting the size, let's figure it out. |
811 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); | 993 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); |
812 if (available < 0) { | 994 if (available < 0) { |
813 max_size_ = kDefaultCacheSize; | 995 max_size_ = kDefaultCacheSize; |
814 return; | 996 return; |
815 } | 997 } |
816 | 998 |
817 if (table_len) | 999 available += index_.header()->num_bytes; |
818 available += data_->header.num_bytes; | |
819 | 1000 |
820 max_size_ = PreferedCacheSize(available); | 1001 max_size_ = PreferedCacheSize(available); |
821 | 1002 |
822 // Let's not use more than the default size while we tune-up the performance | 1003 // Let's not use more than the default size while we tune-up the performance |
823 // of bigger caches. TODO(rvargas): remove this limit. | 1004 // of bigger caches. TODO(rvargas): remove this limit. |
824 if (max_size_ > kDefaultCacheSize * 4) | 1005 if (max_size_ > kDefaultCacheSize * 4) |
825 max_size_ = kDefaultCacheSize * 4; | 1006 max_size_ = kDefaultCacheSize * 4; |
826 | |
827 if (!table_len) | |
828 return; | |
829 | |
830 // If we already have a table, adjust the size to it. | |
831 int current_max_size = MaxStorageSizeForTable(table_len); | |
832 if (max_size_ > current_max_size) | |
833 max_size_= current_max_size; | |
834 } | 1007 } |
835 | 1008 |
836 bool BackendImpl::InitStats() { | 1009 bool BackendImplV3::InitStats(void* stats_data) { |
837 Addr address(data_->header.stats); | 1010 Addr address(index_.header()->stats); |
838 int size = stats_.StorageSize(); | 1011 int size = stats_.StorageSize(); |
839 | 1012 |
840 if (!address.is_initialized()) { | 1013 if (!address.is_initialized()) { |
841 FileType file_type = Addr::RequiredFileType(size); | 1014 FileType file_type = Addr::RequiredFileType(size); |
842 DCHECK_NE(file_type, EXTERNAL); | 1015 DCHECK_NE(file_type, EXTERNAL); |
843 int num_blocks = Addr::RequiredBlocks(size, file_type); | 1016 int num_blocks = Addr::RequiredBlocks(size, file_type); |
844 | 1017 |
845 if (!CreateBlock(file_type, num_blocks, &address)) | 1018 if (!CreateBlock(file_type, num_blocks, &address)) |
846 return false; | 1019 return false; |
847 return stats_.Init(NULL, 0, address); | 1020 return stats_.Init(NULL, 0, address); |
848 } | 1021 } |
849 | 1022 |
850 if (!address.is_block_file()) { | 1023 // Load the required data. |
851 NOTREACHED(); | 1024 DCHECK(address.is_block_file()); |
852 return false; | 1025 size = address.num_blocks() * address.BlockSize(); |
853 } | |
854 | 1026 |
855 // Load the required data. | 1027 if (!stats_.Init(stats_data, size, address)) |
856 size = address.num_blocks() * address.BlockSize(); | |
857 MappedFile* file = File(address); | |
858 if (!file) | |
859 return false; | |
860 | |
861 scoped_ptr<char[]> data(new char[size]); | |
862 size_t offset = address.start_block() * address.BlockSize() + | |
863 kBlockHeaderSize; | |
864 if (!file->Read(data.get(), size, offset)) | |
865 return false; | |
866 | |
867 if (!stats_.Init(data.get(), size, address)) | |
868 return false; | 1028 return false; |
869 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain()) | 1029 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain()) |
870 stats_.InitSizeHistogram(); | 1030 stats_.InitSizeHistogram(); |
871 return true; | 1031 return true; |
872 } | 1032 } |
873 | 1033 |
874 void BackendImpl::StoreStats() { | 1034 void BackendImplV3::StoreStats() { |
875 int size = stats_.StorageSize(); | 1035 int size = stats_.StorageSize(); |
876 scoped_ptr<char[]> data(new char[size]); | 1036 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size)); |
877 Addr address; | 1037 Addr address; |
878 size = stats_.SerializeStats(data.get(), size, &address); | 1038 size = stats_.SerializeStats(buffer->data(), size, &address); |
879 DCHECK(size); | 1039 DCHECK(size); |
880 if (!address.is_initialized()) | 1040 if (!address.is_initialized()) |
881 return; | 1041 return; |
882 | 1042 |
883 MappedFile* file = File(address); | 1043 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_DATA); |
884 if (!file) | 1044 work_item->set_buffer(buffer); |
885 return; | 1045 work_item->set_buffer_len(size); |
886 | 1046 work_item->set_address(address); |
887 size_t offset = address.start_block() * address.BlockSize() + | 1047 work_item->set_offset(0); |
888 kBlockHeaderSize; | 1048 PostWorkItem(work_item); |
889 file->Write(data.get(), size, offset); // ignore result. | |
890 } | 1049 } |
891 | 1050 |
892 void BackendImpl::RestartCache(bool failure) { | 1051 void BackendImplV3::RestartCache(const CompletionCallback& callback) { |
893 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); | |
894 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE); | |
895 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT); | |
896 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); | |
897 | |
898 PrepareForRestart(); | 1052 PrepareForRestart(); |
899 if (failure) { | |
900 DCHECK(!num_refs_); | |
901 DCHECK(!open_entries_.size()); | |
902 DelayedCacheCleanup(path_); | |
903 } else { | |
904 DeleteCache(path_, false); | |
905 } | |
906 | 1053 |
907 // Don't call Init() if directed by the unit test: we are simulating a failure | 1054 // Don't call Init() if directed by the unit test: we are simulating a failure |
908 // trying to re-enable the cache. | 1055 // trying to re-enable the cache. |
909 if (unit_test_) | 1056 if (user_flags_ & UNIT_TEST_MODE) { |
910 init_ = true; // Let the destructor do proper cleanup. | 1057 init_ = true; // Let the destructor do proper cleanup. |
911 else if (SyncInit() == net::OK) { | 1058 } else { |
912 stats_.SetCounter(Stats::FATAL_ERROR, errors); | 1059 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_RESTART); |
913 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms); | 1060 work_item->set_user_callback(callback); |
914 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms); | 1061 work_item->set_flags(user_flags_); |
915 stats_.SetCounter(Stats::LAST_REPORT, last_report); | 1062 PostWorkItem(work_item); |
916 } | 1063 } |
917 } | 1064 } |
918 | 1065 |
919 void BackendImpl::PrepareForRestart() { | 1066 void BackendImplV3::PrepareForRestart() { |
920 // Reset the mask_ if it was not given by the user. | 1067 if (!(user_flags_ & EVICTION_V2)) |
921 if (!(user_flags_ & kMask)) | 1068 lru_eviction_ = true; |
922 mask_ = 0; | 1069 |
923 | |
924 if (!(user_flags_ & kNewEviction)) | |
925 new_eviction_ = false; | |
926 | |
927 disabled_ = true; | 1070 disabled_ = true; |
928 data_->header.crash = 0; | 1071 index_.header()->crash = 0; |
929 index_->Flush(); | 1072 block_files_.Clear(); |
930 index_ = NULL; | 1073 index_.Reset(); |
931 data_ = NULL; | |
932 block_files_.CloseFiles(); | |
933 rankings_.Reset(); | |
934 init_ = false; | 1074 init_ = false; |
935 restarted_ = true; | 1075 restarted_ = true; |
936 } | 1076 } |
937 | 1077 |
938 void BackendImpl::CleanupCache() { | 1078 void BackendImplV3::CleanupCache() { |
939 Trace("Backend Cleanup"); | 1079 Trace("Backend Cleanup"); |
940 eviction_.Stop(); | 1080 //eviction_.Stop(); |
941 timer_.reset(); | 1081 timer_.reset(); |
942 | 1082 |
943 if (init_) { | 1083 if (init_) { |
944 StoreStats(); | 1084 if (!(user_flags_ & NO_CLEAN_ON_EXIT)) { |
945 if (data_) | 1085 StoreStats(); |
946 data_->header.crash = 0; | 1086 CloseDoomedEntries(); |
947 | 1087 ReleaseRecentEntries(); |
948 if (user_flags_ & kNoRandom) { | 1088 UpdateDeletedEntries(); |
949 // This is a net_unittest, verify that we are not 'leaking' entries. | 1089 index_.OnBackupTimer(); |
950 File::WaitForPendingIO(&num_pending_io_); | |
951 DCHECK(!num_refs_); | |
952 } else { | |
953 File::DropPendingIO(); | |
954 } | 1090 } |
| 1091 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLEANUP); |
| 1092 PostWorkItem(work_item); |
| 1093 worker_ = NULL; |
955 } | 1094 } |
956 block_files_.CloseFiles(); | |
957 FlushIndex(); | |
958 index_ = NULL; | |
959 ptr_factory_.InvalidateWeakPtrs(); | 1095 ptr_factory_.InvalidateWeakPtrs(); |
960 done_.Signal(); | |
961 } | 1096 } |
962 | 1097 |
963 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) { | 1098 int BackendImplV3::NewEntry(WorkItem* work_item, EntryImplV3** entry) { |
964 EntriesMap::iterator it = open_entries_.find(address.value()); | 1099 Addr address = |
965 if (it != open_entries_.end()) { | 1100 work_item->entries()->cells[work_item->entries()->current].GetAddress(); |
| 1101 |
| 1102 // The entry could have been opened since this task was posted to the cache |
| 1103 // thread, so let's check again. |
| 1104 EntryImplV3* this_entry = GetOpenEntry(address); |
| 1105 if (this_entry) { |
966 // Easy job. This entry is already in memory. | 1106 // Easy job. This entry is already in memory. |
967 EntryImpl* this_entry = it->second; | |
968 this_entry->AddRef(); | |
969 *entry = this_entry; | 1107 *entry = this_entry; |
970 return 0; | 1108 return 0; |
971 } | 1109 } |
972 | 1110 |
| 1111 // Even if the entry is not in memory right now, it could have changed. Note |
| 1112 // that any state other than USED means we are either deleting this entry or |
| 1113 // it should be in memory. |
| 1114 uint32 hash = |
| 1115 work_item->entries()->cells[work_item->entries()->current].hash(); |
| 1116 EntryCell cell = index_.FindEntryCell(hash, address); |
| 1117 if (!cell.IsValid() || cell.GetState() != ENTRY_USED) |
| 1118 return ERR_INVALID_ENTRY; |
| 1119 |
973 STRESS_DCHECK(block_files_.IsValid(address)); | 1120 STRESS_DCHECK(block_files_.IsValid(address)); |
974 | 1121 |
975 if (!address.SanityCheckForEntry()) { | 1122 if (!address.SanityCheckForEntryV3()) { |
976 LOG(WARNING) << "Wrong entry address."; | 1123 LOG(WARNING) << "Wrong entry address."; |
977 STRESS_NOTREACHED(); | 1124 STRESS_NOTREACHED(); |
978 return ERR_INVALID_ADDRESS; | 1125 return ERR_INVALID_ADDRESS; |
979 } | 1126 } |
980 | 1127 |
981 scoped_refptr<EntryImpl> cache_entry( | 1128 scoped_refptr<EntryImplV3> cache_entry; |
982 new EntryImpl(this, address, read_only_)); | 1129 if (address.file_type() == BLOCK_EVICTED) { |
| 1130 cache_entry = new EntryImplV3(this, address, work_item->key(), |
| 1131 work_item->short_entry_record().Pass()); |
| 1132 } else { |
| 1133 cache_entry = new EntryImplV3(this, address, work_item->key(), |
| 1134 work_item->entry_record().Pass()); |
| 1135 } |
983 IncreaseNumRefs(); | 1136 IncreaseNumRefs(); |
984 *entry = NULL; | 1137 *entry = NULL; |
985 | 1138 |
986 TimeTicks start = TimeTicks::Now(); | |
987 if (!cache_entry->entry()->Load()) | |
988 return ERR_READ_FAILURE; | |
989 | |
990 if (IsLoaded()) { | |
991 CACHE_UMA(AGE_MS, "LoadTime", 0, start); | |
992 } | |
993 | |
994 if (!cache_entry->SanityCheck()) { | 1139 if (!cache_entry->SanityCheck()) { |
995 LOG(WARNING) << "Messed up entry found."; | 1140 LOG(WARNING) << "Messed up entry found."; |
996 STRESS_NOTREACHED(); | 1141 STRESS_NOTREACHED(); |
997 return ERR_INVALID_ENTRY; | 1142 return ERR_INVALID_ENTRY; |
998 } | 1143 } |
999 | 1144 |
1000 STRESS_DCHECK(block_files_.IsValid( | 1145 STRESS_DCHECK(block_files_.IsValid( |
1001 Addr(cache_entry->entry()->Data()->rankings_node))); | 1146 Addr(cache_entry->entry()->Data()->rankings_node))); |
1002 | 1147 |
1003 if (!cache_entry->LoadNodeAddress()) | 1148 if (!cache_entry->DataSanityCheck()) {//--------------------------------------
-------- |
1004 return ERR_READ_FAILURE; | 1149 // just one path? make sure we delete the cell in the first case, and as muc
h data as we can here |
1005 | |
1006 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) { | |
1007 STRESS_NOTREACHED(); | |
1008 cache_entry->SetDirtyFlag(0); | |
1009 // Don't remove this from the list (it is not linked properly). Instead, | |
1010 // break the link back to the entry because it is going away, and leave the | |
1011 // rankings node to be deleted if we find it through a list. | |
1012 rankings_.SetContents(cache_entry->rankings(), 0); | |
1013 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) { | |
1014 STRESS_NOTREACHED(); | |
1015 cache_entry->SetDirtyFlag(0); | |
1016 rankings_.SetContents(cache_entry->rankings(), address.value()); | |
1017 } | |
1018 | |
1019 if (!cache_entry->DataSanityCheck()) { | |
1020 LOG(WARNING) << "Messed up entry found."; | 1150 LOG(WARNING) << "Messed up entry found."; |
1021 cache_entry->SetDirtyFlag(0); | |
1022 cache_entry->FixForDelete(); | 1151 cache_entry->FixForDelete(); |
1023 } | 1152 } |
1024 | 1153 |
1025 // Prevent overwriting the dirty flag on the destructor. | 1154 open_entries_[address.value()] = cache_entry; |
1026 cache_entry->SetDirtyFlag(GetCurrentEntryId()); | 1155 index_.SetSate(cache_entry->GetHash(), address, ENTRY_OPEN); |
1027 | |
1028 if (cache_entry->dirty()) { | |
1029 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()), | |
1030 address.value()); | |
1031 } | |
1032 | |
1033 open_entries_[address.value()] = cache_entry.get(); | |
1034 | 1156 |
1035 cache_entry->BeginLogging(net_log_, false); | 1157 cache_entry->BeginLogging(net_log_, false); |
| 1158 cache_entry->OnOpenEntry(); |
1036 cache_entry.swap(entry); | 1159 cache_entry.swap(entry); |
1037 return 0; | 1160 return 0; |
1038 } | 1161 } |
1039 | 1162 |
| 1163 EntryImplV3* BackendImplV3::LookupOpenEntry(const EntrySet& entries, |
| 1164 const std::string key) { |
| 1165 for (size_t i = 0; i < entries.cells.size(); i++) { |
| 1166 if (entries.cells[i].GetGroup() == ENTRY_EVICTED) |
| 1167 continue; |
| 1168 |
| 1169 EntryImplV3* this_entry = GetOpenEntry(entries.cells[i].GetAddress()); |
| 1170 if (this_entry && this_entry->GetKey() == key) |
| 1171 return this_entry; |
| 1172 } |
| 1173 return NULL; |
| 1174 } |
| 1175 |
1040 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. | 1176 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. |
1041 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) { | 1177 int BackendImplV3::OpenFollowingEntry(bool forward, void** iter, |
| 1178 Entry** next_entry, |
| 1179 const CompletionCallback& callback) { |
1042 if (disabled_) | 1180 if (disabled_) |
1043 return NULL; | 1181 return net::ERR_FAILED; |
1044 | 1182 |
1045 DCHECK(iter); | 1183 DCHECK(iter); |
1046 | 1184 |
1047 const int kListsToSearch = 3; | 1185 scoped_ptr<IndexIterator> iterator( |
1048 scoped_refptr<EntryImpl> entries[kListsToSearch]; | 1186 reinterpret_cast<IndexIterator*>(*iter)); |
1049 scoped_ptr<Rankings::Iterator> iterator( | |
1050 reinterpret_cast<Rankings::Iterator*>(*iter)); | |
1051 *iter = NULL; | 1187 *iter = NULL; |
1052 | 1188 |
1053 if (!iterator.get()) { | 1189 if (!iterator.get()) { |
1054 iterator.reset(new Rankings::Iterator(&rankings_)); | 1190 iterator.reset(new IndexIterator); |
1055 bool ret = false; | 1191 iterator->timestamp = index_.CalculateTimestamp(GetCurrentTime()) + 1; |
1056 | 1192 iterator->forward = forward; |
1057 // Get an entry from each list. | 1193 } |
1058 for (int i = 0; i < kListsToSearch; i++) { | 1194 |
1059 EntryImpl* temp = NULL; | 1195 // Prepare to read the first entry from disk. |
1060 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), | 1196 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
1061 &iterator->nodes[i], &temp); | 1197 work_item->set_flags(WorkItem::WORK_FOR_ITERATION); |
1062 entries[i].swap(&temp); // The entry was already addref'd. | 1198 work_item->set_iterator(iterator.Pass()); |
| 1199 work_item->set_iter_buffer(iter); |
| 1200 work_item->set_entry_buffer(next_entry); |
| 1201 |
| 1202 int rv = OpenNext(work_item); |
| 1203 if (rv == net::ERR_IO_PENDING) |
| 1204 work_item->set_user_callback(callback); |
| 1205 |
| 1206 return rv; |
| 1207 } |
| 1208 |
| 1209 bool BackendImplV3::GetMoreCells(WorkItem* work_item) { |
| 1210 DCHECK(work_item->flags() & WorkItem::WORK_FOR_ITERATION); |
| 1211 IndexIterator* iterator = work_item->iterator(); |
| 1212 |
| 1213 if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) { |
| 1214 int lower_limit = index_.CalculateTimestamp(work_item->initial_time()); |
| 1215 if (iterator->timestamp <= lower_limit || |
| 1216 !index_.GetNextCells(iterator)) { |
| 1217 return false; |
1063 } | 1218 } |
1064 if (!ret) | 1219 return true; |
1065 return NULL; | 1220 } |
| 1221 |
| 1222 return index_.GetNextCells(iterator); |
| 1223 } |
| 1224 |
| 1225 int BackendImplV3::OpenNext(WorkItem* work_item) { |
| 1226 Trace("OpenNext work item 0x%p", work_item); |
| 1227 CellList* cells = &work_item->iterator()->cells; |
| 1228 EntrySet entries; |
| 1229 for (;;) { |
| 1230 if (cells->empty()) { |
| 1231 if (!GetMoreCells(work_item)) { |
| 1232 UpdateIterator(NULL, work_item); |
| 1233 return net::ERR_FAILED; |
| 1234 } |
| 1235 DCHECK(!cells->empty()); |
| 1236 } |
| 1237 |
| 1238 while (!cells->empty()) { |
| 1239 EntryCell last_cell = index_.FindEntryCell(cells->back().hash, |
| 1240 cells->back().address); |
| 1241 cells->pop_back(); |
| 1242 if (!last_cell.IsValid()) |
| 1243 continue; |
| 1244 |
| 1245 entries.cells.push_back(last_cell); |
| 1246 |
| 1247 // See if the entry is currently open. |
| 1248 EntryImplV3* this_entry = GetOpenEntry(last_cell.GetAddress()); |
| 1249 if (this_entry) { |
| 1250 if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) { |
| 1251 Doom(this_entry, work_item); |
| 1252 continue; |
| 1253 } else { |
| 1254 UpdateIterator(this_entry, work_item); |
| 1255 return net::OK; |
| 1256 } |
| 1257 } |
| 1258 |
| 1259 work_item->set_entries(entries); |
| 1260 PostWorkItem(work_item); |
| 1261 return net::ERR_IO_PENDING; |
| 1262 } |
| 1263 } |
| 1264 } |
| 1265 |
| 1266 void BackendImplV3::Doom(EntryImplV3* entry, WorkItem* work_item) { |
| 1267 if (entry->GetLastUsed() >= work_item->initial_time() && |
| 1268 entry->GetLastUsed() < work_item->end_time()) { |
| 1269 Trace("Doom 0x%p work item 0x%p", entry, work_item); |
| 1270 entry->Doom(); |
| 1271 } |
| 1272 entry->Close(); |
| 1273 } |
| 1274 |
| 1275 void BackendImplV3::UpdateIterator(EntryImplV3* entry, WorkItem* work_item) { |
| 1276 int result; |
| 1277 if (entry) { |
| 1278 result = net::OK; |
| 1279 *work_item->iter_buffer() = work_item->ReleaseIterator(); |
| 1280 *work_item->entry_buffer() = entry; |
| 1281 } else if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) { |
| 1282 result = net::OK; |
1066 } else { | 1283 } else { |
1067 // Get the next entry from the last list, and the actual entries for the | 1284 result = net::ERR_FAILED; |
1068 // elements on the other lists. | 1285 *work_item->iter_buffer() = NULL; |
1069 for (int i = 0; i < kListsToSearch; i++) { | 1286 *work_item->entry_buffer() = entry; |
1070 EntryImpl* temp = NULL; | 1287 } |
1071 if (iterator->list == i) { | 1288 |
1072 OpenFollowingEntryFromList(forward, iterator->list, | 1289 if (!work_item->user_callback().is_null()) |
1073 &iterator->nodes[i], &temp); | 1290 work_item->user_callback().Run(result); |
1074 } else { | 1291 } |
1075 temp = GetEnumeratedEntry(iterator->nodes[i], | 1292 |
1076 static_cast<Rankings::List>(i)); | 1293 void BackendImplV3::CloseDoomedEntries() { |
1077 } | 1294 // Copy the current map to make sure no new entries are deleted. |
1078 | 1295 EntriesMap to_delete(entries_to_delete_); |
1079 entries[i].swap(&temp); // The entry was already addref'd. | 1296 for (EntriesMap::iterator it = to_delete.begin(); |
1080 } | 1297 it != to_delete.end(); ++it) { |
1081 } | 1298 it->second->Close(); |
1082 | 1299 } |
1083 int newest = -1; | 1300 } |
1084 int oldest = -1; | 1301 |
1085 Time access_times[kListsToSearch]; | 1302 void BackendImplV3::ReleaseRecentEntries() { |
1086 for (int i = 0; i < kListsToSearch; i++) { | 1303 for (EntriesSet::iterator it = recent_entries_.begin(); |
1087 if (entries[i].get()) { | 1304 it != recent_entries_.end(); ++it) { |
1088 access_times[i] = entries[i]->GetLastUsed(); | 1305 (*it)->Release(); |
1089 if (newest < 0) { | 1306 } |
1090 DCHECK_LT(oldest, 0); | 1307 recent_entries_.clear(); |
1091 newest = oldest = i; | 1308 } |
1092 continue; | 1309 |
1093 } | 1310 void BackendImplV3::UpdateDeletedEntries() { |
1094 if (access_times[i] > access_times[newest]) | 1311 for (size_t i = 0; i < deleted_entries_.size(); i++) { |
1095 newest = i; | 1312 CellInfo& cell_info = deleted_entries_[i]; |
1096 if (access_times[i] < access_times[oldest]) | 1313 index_.SetSate(cell_info.hash, cell_info.address, ENTRY_FREE); |
1097 oldest = i; | 1314 } |
1098 } | 1315 deleted_entries_.clear(); |
1099 } | 1316 } |
1100 | 1317 |
1101 if (newest < 0 || oldest < 0) | 1318 void BackendImplV3::AddStorageSize(int32 bytes) { |
1102 return NULL; | 1319 index_.header()->num_bytes += bytes; |
1103 | 1320 DCHECK_GE(index_.header()->num_bytes, 0); |
1104 EntryImpl* next_entry; | 1321 } |
1105 if (forward) { | 1322 |
1106 next_entry = entries[newest].get(); | 1323 void BackendImplV3::SubstractStorageSize(int32 bytes) { |
1107 iterator->list = static_cast<Rankings::List>(newest); | 1324 index_.header()->num_bytes -= bytes; |
1108 } else { | 1325 DCHECK_GE(index_.header()->num_bytes, 0); |
1109 next_entry = entries[oldest].get(); | 1326 } |
1110 iterator->list = static_cast<Rankings::List>(oldest); | 1327 |
1111 } | 1328 void BackendImplV3::IncreaseNumRefs() { |
1112 | |
1113 *iter = iterator.release(); | |
1114 next_entry->AddRef(); | |
1115 return next_entry; | |
1116 } | |
1117 | |
1118 void BackendImpl::AddStorageSize(int32 bytes) { | |
1119 data_->header.num_bytes += bytes; | |
1120 DCHECK_GE(data_->header.num_bytes, 0); | |
1121 } | |
1122 | |
1123 void BackendImpl::SubstractStorageSize(int32 bytes) { | |
1124 data_->header.num_bytes -= bytes; | |
1125 DCHECK_GE(data_->header.num_bytes, 0); | |
1126 } | |
1127 | |
1128 void BackendImpl::IncreaseNumRefs() { | |
1129 num_refs_++; | 1329 num_refs_++; |
1130 if (max_refs_ < num_refs_) | 1330 if (max_refs_ < num_refs_) |
1131 max_refs_ = num_refs_; | 1331 max_refs_ = num_refs_; |
1132 } | 1332 } |
1133 | 1333 |
1134 void BackendImpl::DecreaseNumRefs() { | 1334 void BackendImplV3::DecreaseNumRefs() { |
1135 DCHECK(num_refs_); | 1335 DCHECK(num_refs_); |
1136 num_refs_--; | 1336 num_refs_--; |
1137 | 1337 } |
1138 if (!num_refs_ && disabled_) | 1338 |
1139 base::MessageLoop::current()->PostTask( | 1339 void BackendImplV3::IncreaseNumEntries() { |
1140 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true)); | 1340 index_.header()->num_entries++; |
1141 } | 1341 DCHECK_GT(index_.header()->num_entries, 0); |
1142 | 1342 } |
1143 void BackendImpl::IncreaseNumEntries() { | 1343 |
1144 data_->header.num_entries++; | 1344 void BackendImplV3::DecreaseNumEntries() { |
1145 DCHECK_GT(data_->header.num_entries, 0); | 1345 index_.header()->num_entries--; |
1146 } | 1346 if (index_.header()->num_entries < 0) { |
1147 | |
1148 void BackendImpl::DecreaseNumEntries() { | |
1149 data_->header.num_entries--; | |
1150 if (data_->header.num_entries < 0) { | |
1151 NOTREACHED(); | 1347 NOTREACHED(); |
1152 data_->header.num_entries = 0; | 1348 index_.header()->num_entries = 0; |
1153 } | 1349 } |
1154 } | 1350 } |
1155 | 1351 |
1156 int BackendImpl::SyncInit() { | 1352 void BackendImplV3::PostWorkItem(WorkItem* work_item) { |
| 1353 if (!worker_) |
| 1354 return; |
| 1355 Trace("Post task 0x%p %d flags 0x%x", work_item, work_item->type(), |
| 1356 work_item->flags()); |
| 1357 |
| 1358 // Long story short: we expect to see the work item back on this thread. |
| 1359 // If the task is not executed we'll leak work_item, but that should only |
| 1360 // happen at shutdown. |
| 1361 work_item->AddRef(); |
| 1362 work_item->set_closure(base::Bind(&BackendImplV3::OnWorkDone, |
| 1363 ptr_factory_.GetWeakPtr())); |
| 1364 cache_thread_->PostTask( |
| 1365 FROM_HERE, |
| 1366 base::Bind(&BackendImplV3::Worker::OnDoWork, worker_, work_item)); |
| 1367 } |
| 1368 |
| 1369 void BackendImplV3::OnWorkDone(WorkItem* work_item) { |
| 1370 Trace("Task done 0x%p %d flags 0x%x", work_item, work_item->type(), |
| 1371 work_item->flags()); |
| 1372 // Balance the reference from PostWorkItem. |
| 1373 scoped_refptr<WorkItem> my_work_item; |
| 1374 my_work_item.swap(&work_item); |
| 1375 |
| 1376 if (!worker_) { |
| 1377 // This may be called after CleanupForTest was called. |
| 1378 if (!my_work_item->user_callback().is_null()) |
| 1379 my_work_item->user_callback().Run(my_work_item->result()); |
| 1380 return; |
| 1381 } |
| 1382 |
| 1383 switch (my_work_item->type()) { |
| 1384 case WorkItem::WORK_INIT: return OnInitComplete(my_work_item); |
| 1385 case WorkItem::WORK_RESTART: return OnInitComplete(my_work_item); |
| 1386 case WorkItem::WORK_GROW_INDEX: return OnGrowIndexComplete(my_work_item); |
| 1387 case WorkItem::WORK_GROW_FILES: return OnGrowFilesComplete(my_work_item); |
| 1388 case WorkItem::WORK_OPEN_ENTRY: return OnOpenEntryComplete(my_work_item); |
| 1389 default: return OnOperationComplete(my_work_item); |
| 1390 } |
| 1391 } |
| 1392 |
| 1393 void BackendImplV3::OnInitComplete(WorkItem* work_item) { |
| 1394 int rv = work_item->result(); |
| 1395 if (rv != ERR_NO_ERROR && rv != ERR_CACHE_CREATED && |
| 1396 rv != ERR_PREVIOUS_CRASH) { |
| 1397 ReportError(rv); |
| 1398 return work_item->user_callback().Run(net::ERR_FAILED); |
| 1399 } |
| 1400 |
1157 #if defined(NET_BUILD_STRESS_CACHE) | 1401 #if defined(NET_BUILD_STRESS_CACHE) |
1158 // Start evictions right away. | 1402 // Start evictions right away. |
1159 up_ticks_ = kTrimDelay * 2; | 1403 up_ticks_ = kTrimDelay * 2; |
1160 #endif | 1404 #endif |
1161 DCHECK(!init_); | 1405 DCHECK(!init_); |
1162 if (init_) | 1406 |
1163 return net::ERR_FAILED; | 1407 num_refs_ = max_refs_ = 0; |
1164 | |
1165 bool create_files = false; | |
1166 if (!InitBackingStore(&create_files)) { | |
1167 ReportError(ERR_STORAGE_ERROR); | |
1168 return net::ERR_FAILED; | |
1169 } | |
1170 | |
1171 num_refs_ = num_pending_io_ = max_refs_ = 0; | |
1172 entry_count_ = byte_count_ = 0; | 1408 entry_count_ = byte_count_ = 0; |
1173 | 1409 |
1174 if (!restarted_) { | 1410 if (!restarted_) { |
1175 buffer_bytes_ = 0; | 1411 buffer_bytes_ = 0; |
1176 trace_object_ = TraceObject::GetTraceObject(); | 1412 trace_object_ = TraceObject::GetTraceObject(); |
1177 // Create a recurrent timer of 30 secs. | 1413 // Create a recurrent timer of 30 secs (90 minutes for tests). |
1178 int timer_delay = unit_test_ ? 1000 : 30000; | 1414 int timer_delay = user_flags_ & BASIC_UNIT_TEST ? 90 * 60 * 1000 : |
1179 timer_.reset(new base::RepeatingTimer<BackendImpl>()); | 1415 kTimerSeconds * 1000; |
| 1416 timer_.reset(new base::RepeatingTimer<BackendImplV3>()); |
1180 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, | 1417 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, |
1181 &BackendImpl::OnStatsTimer); | 1418 &BackendImplV3::OnTimerTick); |
1182 } | 1419 } |
1183 | 1420 Trace("Init"); |
1184 init_ = true; | 1421 init_ = true; |
1185 Trace("Init"); | 1422 |
1186 | 1423 scoped_ptr<InitResult> result = work_item->init_result(); |
1187 if (data_->header.experiment != NO_EXPERIMENT && | 1424 index_.Init(&result.get()->index_data); |
| 1425 |
| 1426 if (index_.header()->experiment != NO_EXPERIMENT && |
1188 cache_type_ != net::DISK_CACHE) { | 1427 cache_type_ != net::DISK_CACHE) { |
1189 // No experiment for other caches. | 1428 // No experiment for other caches. |
1190 return net::ERR_FAILED; | 1429 ReportError(ERR_INIT_FAILED); |
1191 } | 1430 return work_item->user_callback().Run(net::ERR_FAILED); |
1192 | 1431 } |
1193 if (!(user_flags_ & kNoRandom)) { | 1432 |
| 1433 if (!(user_flags_ & BASIC_UNIT_TEST)) { |
1194 // The unit test controls directly what to test. | 1434 // The unit test controls directly what to test. |
1195 new_eviction_ = (cache_type_ == net::DISK_CACHE); | 1435 lru_eviction_ = (cache_type_ != net::DISK_CACHE); |
1196 } | 1436 } |
1197 | 1437 |
1198 if (!CheckIndex()) { | 1438 if (!CheckIndex()) { |
1199 ReportError(ERR_INIT_FAILED); | 1439 ReportError(ERR_INIT_FAILED); |
1200 return net::ERR_FAILED; | 1440 return work_item->user_callback().Run(net::ERR_FAILED); |
1201 } | 1441 } |
1202 | 1442 AdjustMaxCacheSize(); |
1203 if (!restarted_ && (create_files || !data_->header.num_entries)) | 1443 |
1204 ReportError(ERR_CACHE_CREATED); | 1444 block_files_.Init(result->block_bitmaps); |
1205 | |
1206 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE && | |
1207 !InitExperiment(&data_->header, create_files)) { | |
1208 return net::ERR_FAILED; | |
1209 } | |
1210 | |
1211 // We don't care if the value overflows. The only thing we care about is that | |
1212 // the id cannot be zero, because that value is used as "not dirty". | |
1213 // Increasing the value once per second gives us many years before we start | |
1214 // having collisions. | |
1215 data_->header.this_id++; | |
1216 if (!data_->header.this_id) | |
1217 data_->header.this_id++; | |
1218 | |
1219 bool previous_crash = (data_->header.crash != 0); | |
1220 data_->header.crash = 1; | |
1221 | |
1222 if (!block_files_.Init(create_files)) | |
1223 return net::ERR_FAILED; | |
1224 | 1445 |
1225 // We want to minimize the changes to cache for an AppCache. | 1446 // We want to minimize the changes to cache for an AppCache. |
1226 if (cache_type() == net::APP_CACHE) { | 1447 if (cache_type() == net::APP_CACHE) { |
1227 DCHECK(!new_eviction_); | 1448 DCHECK(lru_eviction_); |
1228 read_only_ = true; | 1449 read_only_ = true; |
1229 } else if (cache_type() == net::SHADER_CACHE) { | 1450 } else if (cache_type() == net::SHADER_CACHE) { |
1230 DCHECK(!new_eviction_); | 1451 DCHECK(lru_eviction_); |
1231 } | 1452 } |
1232 | 1453 |
1233 eviction_.Init(this); | 1454 eviction_.Init(this); |
1234 | 1455 |
1235 // stats_ and rankings_ may end up calling back to us so we better be enabled. | 1456 int64 errors, full_dooms, partial_dooms, last_report; |
| 1457 errors = full_dooms = partial_dooms = last_report = 0; |
| 1458 if (work_item->type() == WorkItem::WORK_RESTART) { |
| 1459 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); |
| 1460 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE); |
| 1461 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT); |
| 1462 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); |
| 1463 } |
| 1464 |
| 1465 if (!InitStats(result->stats_data.get())) { |
| 1466 ReportError(ERR_INIT_FAILED); |
| 1467 return work_item->user_callback().Run(net::ERR_FAILED); |
| 1468 } |
| 1469 |
1236 disabled_ = false; | 1470 disabled_ = false; |
1237 if (!InitStats()) | |
1238 return net::ERR_FAILED; | |
1239 | |
1240 disabled_ = !rankings_.Init(this, new_eviction_); | |
1241 | 1471 |
1242 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) | 1472 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) |
1243 trace_object_->EnableTracing(false); | 1473 trace_object_->EnableTracing(false); |
1244 int sc = SelfCheck(); | 1474 int sc = SelfCheck(); |
1245 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) | 1475 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) |
1246 NOTREACHED(); | 1476 NOTREACHED(); |
1247 trace_object_->EnableTracing(true); | 1477 trace_object_->EnableTracing(true); |
1248 #endif | 1478 #endif |
1249 | 1479 |
1250 if (previous_crash) { | 1480 if (work_item->type() == WorkItem::WORK_RESTART) { |
1251 ReportError(ERR_PREVIOUS_CRASH); | 1481 stats_.SetCounter(Stats::FATAL_ERROR, errors); |
1252 } else if (!restarted_) { | 1482 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms); |
1253 ReportError(ERR_NO_ERROR); | 1483 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms); |
1254 } | 1484 stats_.SetCounter(Stats::LAST_REPORT, last_report); |
1255 | 1485 } |
1256 FlushIndex(); | 1486 |
1257 | 1487 ReportError(rv); |
1258 return disabled_ ? net::ERR_FAILED : net::OK; | 1488 return work_item->user_callback().Run(net::OK); |
1259 } | 1489 } |
1260 | 1490 |
1261 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) { | 1491 void BackendImplV3::OnGrowIndexComplete(WorkItem* work_item) { |
1262 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) { | 1492 if (work_item->result() != ERR_NO_ERROR || disabled_ || |
1263 deleted_entry->Release(); | 1493 (work_item->flags() & WorkItem::WORK_COMPLETE)) { |
1264 stats_.OnEvent(Stats::CREATE_MISS); | 1494 growing_index_ = false; |
1265 Trace("create entry miss "); | 1495 return; |
1266 return NULL; | 1496 } |
1267 } | 1497 |
1268 | 1498 scoped_ptr<InitResult> result = work_item->init_result(); |
1269 // We are attempting to create an entry and found out that the entry was | 1499 index_.Init(&result.get()->index_data); |
1270 // previously deleted. | 1500 work_item->set_flags(WorkItem::WORK_COMPLETE); |
1271 | 1501 PostWorkItem(work_item); |
1272 eviction_.OnCreateEntry(deleted_entry); | 1502 } |
1273 entry_count_++; | 1503 |
1274 | 1504 void BackendImplV3::OnGrowFilesComplete(WorkItem* work_item) { |
1275 stats_.OnEvent(Stats::RESURRECT_HIT); | 1505 if (work_item->result() != ERR_NO_ERROR || disabled_ || |
1276 Trace("Resurrect entry hit "); | 1506 (work_item->flags() & WorkItem::WORK_COMPLETE)) { |
1277 return deleted_entry; | 1507 growing_files_ = false; |
1278 } | 1508 return; |
1279 | 1509 } |
1280 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { | 1510 |
1281 if (disabled_ || key.empty()) | 1511 scoped_ptr<InitResult> result = work_item->init_result(); |
1282 return NULL; | 1512 block_files_.Init(result->block_bitmaps); |
1283 | 1513 work_item->set_flags(WorkItem::WORK_COMPLETE); |
1284 TimeTicks start = TimeTicks::Now(); | 1514 PostWorkItem(work_item); |
1285 Trace("Create hash 0x%x", hash); | 1515 } |
1286 | 1516 |
1287 scoped_refptr<EntryImpl> parent; | 1517 void BackendImplV3::OnOperationComplete(WorkItem* work_item) { |
1288 Addr entry_address(data_->table[hash & mask_]); | 1518 if (work_item->result() < 0 && work_item->owner_entry()) { |
1289 if (entry_address.is_initialized()) { | 1519 // Make sure that there's a call to Close() after Doom(). |
1290 // We have an entry already. It could be the one we are looking for, or just | 1520 work_item->owner_entry()->AddRef(); |
1291 // a hash conflict. | 1521 work_item->owner_entry()->Doom(); |
1292 bool error; | 1522 work_item->owner_entry()->Close(); |
1293 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error); | 1523 } |
1294 if (old_entry) | 1524 |
1295 return ResurrectEntry(old_entry); | 1525 if (!work_item->user_callback().is_null()) |
1296 | 1526 work_item->user_callback().Run(work_item->result()); |
1297 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error); | 1527 } |
1298 DCHECK(!error); | 1528 |
1299 if (parent_entry) { | 1529 |
1300 parent.swap(&parent_entry); | 1530 void BackendImplV3::OnOpenEntryComplete(WorkItem* work_item) { |
1301 } else if (data_->table[hash & mask_]) { | 1531 Trace("Open complete"); |
1302 // We should have corrected the problem. | 1532 if (work_item->flags() & WorkItem::WORK_FOR_RESURRECT) |
1303 NOTREACHED(); | 1533 return OnOpenForResurrectComplete(work_item); |
1304 return NULL; | 1534 |
| 1535 if (work_item->flags() & WorkItem::WORK_FOR_EVICT) |
| 1536 return OnEvictEntryComplete(work_item); |
| 1537 |
| 1538 if (work_item->flags() & WorkItem::WORK_FOR_ITERATION) |
| 1539 return OnOpenNextComplete(work_item); |
| 1540 |
| 1541 if (work_item->result() == ERR_NO_ERROR) { |
| 1542 EntryImplV3* entry; |
| 1543 int error = NewEntry(work_item, &entry); |
| 1544 if (!error) { |
| 1545 if (work_item->flags() & WorkItem::WORK_FOR_DOOM) { |
| 1546 entry->Doom(); |
| 1547 entry->Close(); |
| 1548 } else { |
| 1549 eviction_.OnOpenEntry(entry); |
| 1550 entry_count_++; |
| 1551 if (work_item->flags() & WorkItem::WORK_FOR_UPDATE) { |
| 1552 UpdateRank(entry, true); |
| 1553 return; |
| 1554 } |
| 1555 *work_item->entry_buffer() = entry; |
| 1556 |
| 1557 Trace("Open hash 0x%x end: 0x%x", entry->GetHash(), |
| 1558 entry->GetAddress().value()); |
| 1559 stats_.OnEvent(Stats::OPEN_HIT); |
| 1560 SIMPLE_STATS_COUNTER("disk_cache.hit"); |
| 1561 } |
| 1562 |
| 1563 work_item->user_callback().Run(net::OK); |
| 1564 return; |
1305 } | 1565 } |
1306 } | 1566 } |
1307 | 1567 |
1308 // The general flow is to allocate disk space and initialize the entry data, | 1568 if (work_item->entries()->current >= work_item->entries()->cells.size() - 1) {
// - 1? |
1309 // followed by saving that to disk, then linking the entry though the index | 1569 // Not found. |
1310 // and finally through the lists. If there is a crash in this process, we may | 1570 work_item->user_callback().Run(net::ERR_FAILED); |
1311 // end up with: | 1571 return; |
1312 // a. Used, unreferenced empty blocks on disk (basically just garbage). | 1572 } |
1313 // b. Used, unreferenced but meaningful data on disk (more garbage). | 1573 |
1314 // c. A fully formed entry, reachable only through the index. | 1574 //+post a task to delete the cell |
1315 // d. A fully formed entry, also reachable through the lists, but still dirty. | 1575 |
1316 // | 1576 // Open the next entry on the list. |
1317 // Anything after (b) can be automatically cleaned up. We may consider saving | 1577 work_item->entries()->current++; |
1318 // the current operation (as we do while manipulating the lists) so that we | 1578 if (work_item->entries()->current < work_item->entries()->cells.size()) |
1319 // can detect and cleanup (a) and (b). | 1579 PostWorkItem(work_item); |
1320 | 1580 } |
1321 int num_blocks = EntryImpl::NumBlocksForEntry(key.size()); | 1581 |
1322 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { | 1582 void BackendImplV3::OnOpenForResurrectComplete(WorkItem* work_item) { |
| 1583 if (work_item->result() == ERR_NO_ERROR) { |
| 1584 EntryImplV3* deleted_entry; |
| 1585 int error = NewEntry(work_item, &deleted_entry); |
| 1586 if (!error) { |
| 1587 scoped_ptr<ShortEntryRecord> entry_record = |
| 1588 deleted_entry->GetShortEntryRecord(); |
| 1589 CHECK(entry_record); |
| 1590 if (!entry_record) { |
| 1591 // This is an active entry. |
| 1592 deleted_entry->Close(); |
| 1593 stats_.OnEvent(Stats::CREATE_MISS); |
| 1594 Trace("create entry miss "); |
| 1595 work_item->user_callback().Run(net::ERR_FAILED);//doesn't make any sense |
| 1596 return; |
| 1597 } |
| 1598 |
| 1599 // We are attempting to create an entry and found out that the entry was |
| 1600 // previously deleted. |
| 1601 |
| 1602 stats_.OnEvent(Stats::RESURRECT_HIT); |
| 1603 Trace("Resurrect entry hit "); |
| 1604 deleted_entry->Doom(); |
| 1605 deleted_entry->Close(); |
| 1606 |
| 1607 int rv = |
| 1608 OnCreateEntryComplete(work_item->key(), deleted_entry->GetHash(), |
| 1609 entry_record.get(), work_item->entry_buffer(), |
| 1610 work_item->user_callback()); |
| 1611 DCHECK_EQ(rv, net::OK); |
| 1612 return; |
| 1613 } |
| 1614 } |
| 1615 |
| 1616 if (work_item->entries()->current >= work_item->entries()->cells.size()) { |
| 1617 // Not found. |
| 1618 work_item->user_callback().Run(net::ERR_FAILED); |
| 1619 return; |
| 1620 } |
| 1621 |
| 1622 //+post a task to delete the cell |
| 1623 |
| 1624 // Open the next entry on the list. |
| 1625 work_item->entries()->current++; |
| 1626 if (work_item->entries()->current < work_item->entries()->cells.size()) |
| 1627 PostWorkItem(work_item); |
| 1628 } |
| 1629 |
| 1630 void BackendImplV3::OnEvictEntryComplete(WorkItem* work_item) { |
| 1631 if (work_item->result() != ERR_NO_ERROR) |
| 1632 return eviction_.OnEvictEntryComplete(); |
| 1633 |
| 1634 EntryCell old_cell = |
| 1635 index_.FindEntryCell(work_item->entries()->cells[0].hash(), |
| 1636 work_item->entries()->cells[0].GetAddress()); |
| 1637 DCHECK(old_cell.IsValid()); |
| 1638 |
| 1639 if (!(work_item->flags() & WorkItem::WORK_NO_COPY)) { |
| 1640 EntryCell new_cell = |
| 1641 index_.FindEntryCell(work_item->entries()->cells[1].hash(), |
| 1642 work_item->entries()->cells[1].GetAddress()); |
| 1643 DCHECK(new_cell.IsValid()); |
| 1644 } |
| 1645 |
| 1646 EntryImplV3* entry; |
| 1647 int error = NewEntry(work_item, &entry); |
| 1648 if (!error) { |
| 1649 entry->Doom(); |
| 1650 entry->Close(); |
| 1651 } |
| 1652 |
| 1653 //+delete old_cell after a timer (so add to deleted entries). |
| 1654 |
| 1655 eviction_.OnEvictEntryComplete(); |
| 1656 } |
| 1657 |
| 1658 void BackendImplV3::OnOpenNextComplete(WorkItem* work_item) { |
| 1659 Trace("OpenNext complete, work item 0x%p", work_item); |
| 1660 if (work_item->result() != ERR_NO_ERROR) { |
| 1661 OpenNext(work_item); // Ignore result. |
| 1662 return; |
| 1663 } |
| 1664 |
| 1665 EntryImplV3* entry; |
| 1666 int error = NewEntry(work_item, &entry); |
| 1667 if (!error) { |
| 1668 if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) |
| 1669 Doom(entry, work_item); |
| 1670 else |
| 1671 return UpdateIterator(entry, work_item); |
| 1672 } |
| 1673 |
| 1674 // Grab another entry. |
| 1675 OpenNext(work_item); // Ignore result. |
| 1676 } |
| 1677 |
| 1678 int BackendImplV3::OnCreateEntryComplete(const std::string& key, uint32 hash, |
| 1679 ShortEntryRecord* short_record, |
| 1680 Entry** entry, |
| 1681 const CompletionCallback& callback) { |
| 1682 // Create a new object in memory and return it to the caller. |
| 1683 Addr entry_address; |
| 1684 Trace("Create complete hash 0x%x", hash); |
| 1685 if (!block_files_.CreateBlock(BLOCK_ENTRIES, 1, &entry_address)) { |
1323 LOG(ERROR) << "Create entry failed " << key.c_str(); | 1686 LOG(ERROR) << "Create entry failed " << key.c_str(); |
1324 stats_.OnEvent(Stats::CREATE_ERROR); | 1687 stats_.OnEvent(Stats::CREATE_ERROR); |
1325 return NULL; | 1688 return net::ERR_FAILED; |
1326 } | 1689 } |
1327 | 1690 |
1328 Addr node_address(0); | 1691 EntryCell cell = index_.CreateEntryCell(hash, entry_address); |
1329 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { | 1692 if (!cell.IsValid()) { |
1330 block_files_.DeleteBlock(entry_address, false); | 1693 block_files_.DeleteBlock(entry_address); |
1331 LOG(ERROR) << "Create entry failed " << key.c_str(); | 1694 return net::ERR_FAILED; |
1332 stats_.OnEvent(Stats::CREATE_ERROR); | 1695 } |
1333 return NULL; | 1696 |
1334 } | 1697 scoped_refptr<EntryImplV3> cache_entry( |
1335 | 1698 new EntryImplV3(this, cell.GetAddress(), false)); |
1336 scoped_refptr<EntryImpl> cache_entry( | |
1337 new EntryImpl(this, entry_address, false)); | |
1338 IncreaseNumRefs(); | 1699 IncreaseNumRefs(); |
1339 | 1700 |
1340 if (!cache_entry->CreateEntry(node_address, key, hash)) { | 1701 cache_entry->CreateEntry(key, hash, short_record); |
1341 block_files_.DeleteBlock(entry_address, false); | |
1342 block_files_.DeleteBlock(node_address, false); | |
1343 LOG(ERROR) << "Create entry failed " << key.c_str(); | |
1344 stats_.OnEvent(Stats::CREATE_ERROR); | |
1345 return NULL; | |
1346 } | |
1347 | |
1348 cache_entry->BeginLogging(net_log_, true); | 1702 cache_entry->BeginLogging(net_log_, true); |
1349 | 1703 |
1350 // We are not failing the operation; let's add this to the map. | 1704 // We are not failing the operation; let's add this to the map. |
1351 open_entries_[entry_address.value()] = cache_entry; | 1705 open_entries_[cell.GetAddress().value()] = cache_entry; |
1352 | 1706 |
1353 // Save the entry. | |
1354 cache_entry->entry()->Store(); | |
1355 cache_entry->rankings()->Store(); | |
1356 IncreaseNumEntries(); | 1707 IncreaseNumEntries(); |
1357 entry_count_++; | 1708 entry_count_++; |
1358 | 1709 |
1359 // Link this entry through the index. | 1710 if (short_record) |
1360 if (parent.get()) { | 1711 eviction_.OnResurrectEntry(cache_entry); |
1361 parent->SetNextAddress(entry_address); | 1712 else |
1362 } else { | 1713 eviction_.OnCreateEntry(cache_entry); |
1363 data_->table[hash & mask_] = entry_address.value(); | 1714 |
1364 } | |
1365 | |
1366 // Link this entry through the lists. | |
1367 eviction_.OnCreateEntry(cache_entry); | |
1368 | |
1369 CACHE_UMA(AGE_MS, "CreateTime", 0, start); | |
1370 stats_.OnEvent(Stats::CREATE_HIT); | 1715 stats_.OnEvent(Stats::CREATE_HIT); |
1371 SIMPLE_STATS_COUNTER("disk_cache.miss"); | 1716 SIMPLE_STATS_COUNTER("disk_cache.miss"); |
1372 Trace("create entry hit "); | 1717 Trace("create entry hit "); |
1373 FlushIndex(); | |
1374 cache_entry->AddRef(); | 1718 cache_entry->AddRef(); |
1375 return cache_entry.get(); | 1719 *entry = cache_entry.get(); |
1376 } | 1720 |
1377 | 1721 if (short_record) |
1378 void BackendImpl::LogStats() { | 1722 callback.Run(net::OK); |
| 1723 |
| 1724 return net::OK; |
| 1725 } |
| 1726 |
| 1727 void BackendImplV3::LogStats() { |
1379 StatsItems stats; | 1728 StatsItems stats; |
1380 GetStats(&stats); | 1729 GetStats(&stats); |
1381 | 1730 |
1382 for (size_t index = 0; index < stats.size(); index++) | 1731 for (size_t index = 0; index < stats.size(); index++) |
1383 VLOG(1) << stats[index].first << ": " << stats[index].second; | 1732 VLOG(1) << stats[index].first << ": " << stats[index].second; |
1384 } | 1733 } |
1385 | 1734 |
1386 void BackendImpl::ReportStats() { | 1735 void BackendImplV3::ReportStats() { |
1387 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries); | 1736 IndexHeaderV3* header = index_.header(); |
| 1737 CACHE_UMA(COUNTS, "Entries", 0, header->num_entries); |
1388 | 1738 |
1389 int current_size = data_->header.num_bytes / (1024 * 1024); | 1739 int current_size = header->num_bytes / (1024 * 1024); |
1390 int max_size = max_size_ / (1024 * 1024); | 1740 int max_size = max_size_ / (1024 * 1024); |
1391 int hit_ratio_as_percentage = stats_.GetHitRatio(); | |
1392 | 1741 |
1393 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size); | 1742 CACHE_UMA(COUNTS_10000, "Size", 0, current_size); |
1394 // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the | 1743 CACHE_UMA(COUNTS_10000, "MaxSize", 0, max_size); |
1395 // ratio of that bin's total count to the count in the same bin in the Size2 | |
1396 // histogram. | |
1397 if (base::RandInt(0, 99) < hit_ratio_as_percentage) | |
1398 CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size); | |
1399 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size); | |
1400 if (!max_size) | 1744 if (!max_size) |
1401 max_size++; | 1745 max_size++; |
1402 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size); | 1746 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size); |
1403 | 1747 |
1404 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0, | 1748 CACHE_UMA(COUNTS_10000, "AverageOpenEntries", 0, |
1405 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); | 1749 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); |
1406 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0, | 1750 CACHE_UMA(COUNTS_10000, "MaxOpenEntries", 0, |
1407 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); | 1751 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); |
1408 stats_.SetCounter(Stats::MAX_ENTRIES, 0); | 1752 stats_.SetCounter(Stats::MAX_ENTRIES, 0); |
1409 | 1753 |
1410 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0, | 1754 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0, |
1411 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); | 1755 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); |
1412 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0, | 1756 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0, |
1413 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); | 1757 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); |
1414 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0, | 1758 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0, |
1415 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); | 1759 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); |
1416 stats_.SetCounter(Stats::FATAL_ERROR, 0); | 1760 stats_.SetCounter(Stats::FATAL_ERROR, 0); |
1417 stats_.SetCounter(Stats::DOOM_CACHE, 0); | 1761 stats_.SetCounter(Stats::DOOM_CACHE, 0); |
1418 stats_.SetCounter(Stats::DOOM_RECENT, 0); | 1762 stats_.SetCounter(Stats::DOOM_RECENT, 0); |
1419 | 1763 |
1420 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; | 1764 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; |
1421 if (!data_->header.create_time || !data_->header.lru.filled) { | 1765 if (!(header->flags & CACHE_EVICTED)) { |
1422 int cause = data_->header.create_time ? 0 : 1; | |
1423 if (!data_->header.lru.filled) | |
1424 cause |= 2; | |
1425 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause); | |
1426 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours)); | 1766 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours)); |
1427 return; | 1767 return; |
1428 } | 1768 } |
1429 | 1769 |
1430 // This is an up to date client that will report FirstEviction() data. After | 1770 // This is an up to date client that will report FirstEviction() data. After |
1431 // that event, start reporting this: | 1771 // that event, start reporting this: |
1432 | 1772 |
1433 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours)); | 1773 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours)); |
1434 // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total | |
1435 // time is the ratio of that bin's total count to the count in the same bin in | |
1436 // the TotalTime histogram. | |
1437 if (base::RandInt(0, 99) < hit_ratio_as_percentage) | |
1438 CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours)); | |
1439 | 1774 |
1440 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; | 1775 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; |
1441 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); | 1776 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); |
1442 | 1777 |
1443 // We may see users with no use_hours at this point if this is the first time | 1778 // We may see users with no use_hours at this point if this is the first time |
1444 // we are running this code. | 1779 // we are running this code. |
1445 if (use_hours) | 1780 if (use_hours) |
1446 use_hours = total_hours - use_hours; | 1781 use_hours = total_hours - use_hours; |
1447 | 1782 |
1448 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) | 1783 if (!use_hours || !GetEntryCount() || !header->num_bytes) |
1449 return; | 1784 return; |
1450 | 1785 |
1451 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours)); | 1786 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours)); |
1452 // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time | 1787 |
1453 // is the ratio of that bin's total count to the count in the same bin in the | |
1454 // UseTime histogram. | |
1455 if (base::RandInt(0, 99) < hit_ratio_as_percentage) | |
1456 CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours)); | |
1457 CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage); | |
1458 | |
1459 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; | 1788 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; |
1460 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate)); | 1789 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate)); |
1461 | 1790 |
1462 int avg_size = data_->header.num_bytes / GetEntryCount(); | 1791 int avg_size = header->num_bytes / GetEntryCount(); |
1463 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); | 1792 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); |
1464 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries); | 1793 CACHE_UMA(COUNTS, "EntriesFull", 0, header->num_entries); |
1465 | |
1466 CACHE_UMA(PERCENTAGE, "IndexLoad", 0, | |
1467 data_->header.num_entries * 100 / (mask_ + 1)); | |
1468 | 1794 |
1469 int large_entries_bytes = stats_.GetLargeEntriesSize(); | 1795 int large_entries_bytes = stats_.GetLargeEntriesSize(); |
1470 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; | 1796 int large_ratio = large_entries_bytes * 100 / header->num_bytes; |
1471 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio); | 1797 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio); |
1472 | 1798 |
1473 if (new_eviction_) { | 1799 if (!lru_eviction_) { |
1474 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio()); | 1800 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio()); |
1475 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, | 1801 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, |
1476 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); | 1802 header->num_no_use_entries * 100 / header->num_entries); |
1477 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, | 1803 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, |
1478 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); | 1804 header->num_low_use_entries * 100 / header->num_entries); |
1479 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, | 1805 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, |
1480 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); | 1806 header->num_high_use_entries * 100 / header->num_entries); |
1481 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0, | 1807 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0, |
1482 data_->header.lru.sizes[4] * 100 / data_->header.num_entries); | 1808 header->num_evicted_entries * 100 / header->num_entries); |
1483 } | 1809 } |
1484 | 1810 |
1485 stats_.ResetRatios(); | 1811 stats_.ResetRatios(); |
1486 stats_.SetCounter(Stats::TRIM_ENTRY, 0); | 1812 stats_.SetCounter(Stats::TRIM_ENTRY, 0); |
1487 | 1813 |
1488 if (cache_type_ == net::DISK_CACHE) | 1814 if (cache_type_ == net::DISK_CACHE) |
1489 block_files_.ReportStats(); | 1815 block_files_.ReportStats(); |
1490 } | 1816 } |
1491 | 1817 |
1492 void BackendImpl::ReportError(int error) { | 1818 void BackendImplV3::ReportError(int error) { |
1493 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH || | 1819 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH || |
1494 error == ERR_CACHE_CREATED); | 1820 error == ERR_CACHE_CREATED); |
1495 | 1821 |
1496 // We transmit positive numbers, instead of direct error codes. | 1822 // We transmit positive numbers, instead of direct error codes. |
1497 DCHECK_LE(error, 0); | 1823 DCHECK_LE(error, 0); |
1498 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); | 1824 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); |
1499 } | 1825 } |
1500 | 1826 |
1501 bool BackendImpl::CheckIndex() { | 1827 bool BackendImplV3::CheckIndex() { |
1502 DCHECK(data_); | 1828 if (index_.header()->flags & CACHE_EVICTION_2) |
| 1829 lru_eviction_ = false; |
1503 | 1830 |
1504 size_t current_size = index_->GetLength(); | 1831 /* |
1505 if (current_size < sizeof(Index)) { | 1832 if (!index_.header()->table_len) { |
| 1833 LOG(ERROR) << "Invalid table size"; |
| 1834 return false; |
| 1835 } |
| 1836 |
| 1837 if (current_size < GetIndexSize(index_.header()->table_len) || |
| 1838 index_.header()->table_len & (kBaseTableLen - 1)) { |
1506 LOG(ERROR) << "Corrupt Index file"; | 1839 LOG(ERROR) << "Corrupt Index file"; |
1507 return false; | 1840 return false; |
1508 } | 1841 } |
1509 | 1842 |
1510 if (new_eviction_) { | 1843 AdjustMaxCacheSize(index_.header()->table_len); |
1511 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1. | |
1512 if (kIndexMagic != data_->header.magic || | |
1513 kCurrentVersion >> 16 != data_->header.version >> 16) { | |
1514 LOG(ERROR) << "Invalid file version or magic"; | |
1515 return false; | |
1516 } | |
1517 if (kCurrentVersion == data_->header.version) { | |
1518 // We need file version 2.1 for the new eviction algorithm. | |
1519 UpgradeTo2_1(); | |
1520 } | |
1521 } else { | |
1522 if (kIndexMagic != data_->header.magic || | |
1523 kCurrentVersion != data_->header.version) { | |
1524 LOG(ERROR) << "Invalid file version or magic"; | |
1525 return false; | |
1526 } | |
1527 } | |
1528 | |
1529 if (!data_->header.table_len) { | |
1530 LOG(ERROR) << "Invalid table size"; | |
1531 return false; | |
1532 } | |
1533 | |
1534 if (current_size < GetIndexSize(data_->header.table_len) || | |
1535 data_->header.table_len & (kBaseTableLen - 1)) { | |
1536 LOG(ERROR) << "Corrupt Index file"; | |
1537 return false; | |
1538 } | |
1539 | |
1540 AdjustMaxCacheSize(data_->header.table_len); | |
1541 | 1844 |
1542 #if !defined(NET_BUILD_STRESS_CACHE) | 1845 #if !defined(NET_BUILD_STRESS_CACHE) |
1543 if (data_->header.num_bytes < 0 || | 1846 if (index_.header()->num_bytes < 0 || |
1544 (max_size_ < kint32max - kDefaultCacheSize && | 1847 (max_size_ < kint32max - kDefaultCacheSize && |
1545 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) { | 1848 index_.header()->num_bytes > max_size_ + kDefaultCacheSize)) { |
1546 LOG(ERROR) << "Invalid cache (current) size"; | 1849 LOG(ERROR) << "Invalid cache (current) size"; |
1547 return false; | 1850 return false; |
1548 } | 1851 } |
1549 #endif | 1852 #endif |
1550 | 1853 |
1551 if (data_->header.num_entries < 0) { | 1854 if (index_.header()->num_entries < 0) { |
1552 LOG(ERROR) << "Invalid number of entries"; | 1855 LOG(ERROR) << "Invalid number of entries"; |
1553 return false; | 1856 return false; |
1554 } | 1857 } |
1555 | 1858 |
1556 if (!mask_) | 1859 if (!mask_) |
1557 mask_ = data_->header.table_len - 1; | 1860 mask_ = index_.header()->table_len - 1; |
1558 | 1861 |
1559 // Load the table into memory with a single read. | 1862 // Load the table into memory with a single read. |
1560 scoped_ptr<char[]> buf(new char[current_size]); | 1863 scoped_array<char> buf(new char[current_size]); |
1561 return index_->Read(buf.get(), current_size, 0); | 1864 return index_->Read(buf.get(), current_size, 0); |
| 1865 */ |
| 1866 |
| 1867 //Make sure things look fine, maybe scan the whole thing if not. |
| 1868 return true; |
1562 } | 1869 } |
1563 | 1870 |
1564 int BackendImpl::CheckAllEntries() { | 1871 int BackendImplV3::CheckAllEntries() { |
| 1872 /* |
1565 int num_dirty = 0; | 1873 int num_dirty = 0; |
1566 int num_entries = 0; | 1874 int num_entries = 0; |
1567 DCHECK(mask_ < kuint32max); | 1875 DCHECK(mask_ < kuint32max); |
1568 for (unsigned int i = 0; i <= mask_; i++) { | 1876 for (unsigned int i = 0; i <= mask_; i++) { |
1569 Addr address(data_->table[i]); | 1877 Addr address(data_->table[i]); |
1570 if (!address.is_initialized()) | 1878 if (!address.is_initialized()) |
1571 continue; | 1879 continue; |
1572 for (;;) { | 1880 for (;;) { |
1573 EntryImpl* tmp; | 1881 EntryImplV3* tmp; |
1574 int ret = NewEntry(address, &tmp); | 1882 int ret = NewEntry(address, &tmp); |
1575 if (ret) { | 1883 if (ret) { |
1576 STRESS_NOTREACHED(); | 1884 STRESS_NOTREACHED(); |
1577 return ret; | 1885 return ret; |
1578 } | 1886 } |
1579 scoped_refptr<EntryImpl> cache_entry; | 1887 scoped_refptr<EntryImplV3> cache_entry; |
1580 cache_entry.swap(&tmp); | 1888 cache_entry.swap(&tmp); |
1581 | 1889 |
1582 if (cache_entry->dirty()) | 1890 if (cache_entry->dirty()) |
1583 num_dirty++; | 1891 num_dirty++; |
1584 else if (CheckEntry(cache_entry.get())) | 1892 else if (CheckEntry(cache_entry.get())) |
1585 num_entries++; | 1893 num_entries++; |
1586 else | 1894 else |
1587 return ERR_INVALID_ENTRY; | 1895 return ERR_INVALID_ENTRY; |
1588 | 1896 |
1589 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_); | 1897 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_); |
1590 address.set_value(cache_entry->GetNextAddress()); | 1898 address.set_value(cache_entry->GetNextAddress()); |
1591 if (!address.is_initialized()) | 1899 if (!address.is_initialized()) |
1592 break; | 1900 break; |
1593 } | 1901 } |
1594 } | 1902 } |
1595 | 1903 |
1596 Trace("CheckAllEntries End"); | 1904 Trace("CheckAllEntries End"); |
1597 if (num_entries + num_dirty != data_->header.num_entries) { | 1905 if (num_entries + num_dirty != index_.header()->num_entries) { |
1598 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << | 1906 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << |
1599 " " << data_->header.num_entries; | 1907 " " << index_.header()->num_entries; |
1600 DCHECK_LT(num_entries, data_->header.num_entries); | 1908 DCHECK_LT(num_entries, index_.header()->num_entries); |
1601 return ERR_NUM_ENTRIES_MISMATCH; | 1909 return ERR_NUM_ENTRIES_MISMATCH; |
1602 } | 1910 } |
1603 | 1911 |
1604 return num_dirty; | 1912 return num_dirty; |
| 1913 */ |
| 1914 return 0; |
1605 } | 1915 } |
1606 | 1916 |
1607 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { | 1917 bool BackendImplV3::CheckEntry(EntryImplV3* cache_entry) { |
| 1918 /* |
1608 bool ok = block_files_.IsValid(cache_entry->entry()->address()); | 1919 bool ok = block_files_.IsValid(cache_entry->entry()->address()); |
1609 ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); | 1920 ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); |
1610 EntryStore* data = cache_entry->entry()->Data(); | 1921 EntryStore* data = cache_entry->entry()->Data(); |
1611 for (size_t i = 0; i < arraysize(data->data_addr); i++) { | 1922 for (size_t i = 0; i < arraysize(data->data_addr); i++) { |
1612 if (data->data_addr[i]) { | 1923 if (data->data_addr[i]) { |
1613 Addr address(data->data_addr[i]); | 1924 Addr address(data->data_addr[i]); |
1614 if (address.is_block_file()) | 1925 if (address.is_block_file()) |
1615 ok = ok && block_files_.IsValid(address); | 1926 ok = ok && block_files_.IsValid(address); |
1616 } | 1927 } |
1617 } | 1928 } |
1618 | 1929 |
1619 return ok && cache_entry->rankings()->VerifyHash(); | 1930 return ok && cache_entry->rankings()->VerifyHash(); |
| 1931 */ |
| 1932 return true; |
1620 } | 1933 } |
1621 | 1934 |
1622 int BackendImpl::MaxBuffersSize() { | 1935 int BackendImplV3::MaxBuffersSize() { |
1623 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); | 1936 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); |
1624 static bool done = false; | 1937 static bool done = false; |
1625 | 1938 |
1626 if (!done) { | 1939 if (!done) { |
1627 const int kMaxBuffersSize = 30 * 1024 * 1024; | 1940 const int kMaxBuffersSize = 30 * 1024 * 1024; |
1628 | 1941 |
1629 // We want to use up to 2% of the computer's memory. | 1942 // We want to use up to 2% of the computer's memory. |
1630 total_memory = total_memory * 2 / 100; | 1943 total_memory = total_memory * 2 / 100; |
1631 if (total_memory > kMaxBuffersSize || total_memory <= 0) | 1944 if (total_memory > kMaxBuffersSize || total_memory <= 0) |
1632 total_memory = kMaxBuffersSize; | 1945 total_memory = kMaxBuffersSize; |
1633 | 1946 |
1634 done = true; | 1947 done = true; |
1635 } | 1948 } |
1636 | 1949 |
1637 return static_cast<int>(total_memory); | 1950 return static_cast<int>(total_memory); |
1638 } | 1951 } |
1639 | 1952 |
1640 } // namespace disk_cache | 1953 } // namespace disk_cache |
OLD | NEW |