OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/disk_cache/blockfile/backend_impl.h" | |
6 | |
7 #include "base/bind.h" | |
8 #include "base/bind_helpers.h" | |
9 #include "base/files/file.h" | |
10 #include "base/files/file_path.h" | |
11 #include "base/files/file_util.h" | |
12 #include "base/hash.h" | |
13 #include "base/message_loop/message_loop.h" | |
14 #include "base/metrics/field_trial.h" | |
15 #include "base/metrics/histogram.h" | |
16 #include "base/rand_util.h" | |
17 #include "base/single_thread_task_runner.h" | |
18 #include "base/strings/string_util.h" | |
19 #include "base/strings/stringprintf.h" | |
20 #include "base/sys_info.h" | |
21 #include "base/threading/thread_restrictions.h" | |
22 #include "base/time/time.h" | |
23 #include "base/timer/timer.h" | |
24 #include "net/base/net_errors.h" | |
25 #include "net/disk_cache/blockfile/disk_format.h" | |
26 #include "net/disk_cache/blockfile/entry_impl.h" | |
27 #include "net/disk_cache/blockfile/errors.h" | |
28 #include "net/disk_cache/blockfile/experiments.h" | |
29 #include "net/disk_cache/blockfile/file.h" | |
30 #include "net/disk_cache/blockfile/histogram_macros.h" | |
31 #include "net/disk_cache/blockfile/webfonts_histogram.h" | |
32 #include "net/disk_cache/cache_util.h" | |
33 | |
34 // Provide a BackendImpl object to macros from histogram_macros.h. | |
35 #define CACHE_UMA_BACKEND_IMPL_OBJ this | |
36 | |
37 using base::Time; | |
38 using base::TimeDelta; | |
39 using base::TimeTicks; | |
40 | |
41 namespace { | |
42 | |
43 const char kIndexName[] = "index"; | |
44 | |
45 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. | |
46 // Note that the actual target is to keep the index table load factor under 55% | |
47 // for most users. | |
48 const int k64kEntriesStore = 240 * 1000 * 1000; | |
49 const int kBaseTableLen = 64 * 1024; | |
50 | |
51 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). | |
52 const int kTrimDelay = 10; | |
53 | |
54 int DesiredIndexTableLen(int32 storage_size) { | |
55 if (storage_size <= k64kEntriesStore) | |
56 return kBaseTableLen; | |
57 if (storage_size <= k64kEntriesStore * 2) | |
58 return kBaseTableLen * 2; | |
59 if (storage_size <= k64kEntriesStore * 4) | |
60 return kBaseTableLen * 4; | |
61 if (storage_size <= k64kEntriesStore * 8) | |
62 return kBaseTableLen * 8; | |
63 | |
64 // The biggest storage_size for int32 requires a 4 MB table. | |
65 return kBaseTableLen * 16; | |
66 } | |
67 | |
68 int MaxStorageSizeForTable(int table_len) { | |
69 return table_len * (k64kEntriesStore / kBaseTableLen); | |
70 } | |
71 | |
72 size_t GetIndexSize(int table_len) { | |
73 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; | |
74 return sizeof(disk_cache::IndexHeader) + table_size; | |
75 } | |
76 | |
77 // ------------------------------------------------------------------------ | |
78 | |
79 // Sets group for the current experiment. Returns false if the files should be | |
80 // discarded. | |
81 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) { | |
82 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 || | |
83 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) { | |
84 // Discard current cache. | |
85 return false; | |
86 } | |
87 | |
88 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") == | |
89 "ExperimentControl") { | |
90 if (cache_created) { | |
91 header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL; | |
92 return true; | |
93 } | |
94 return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL; | |
95 } | |
96 | |
97 header->experiment = disk_cache::NO_EXPERIMENT; | |
98 return true; | |
99 } | |
100 | |
101 // A callback to perform final cleanup on the background thread. | |
102 void FinalCleanupCallback(disk_cache::BackendImpl* backend) { | |
103 backend->CleanupCache(); | |
104 } | |
105 | |
106 } // namespace | |
107 | |
108 // ------------------------------------------------------------------------ | |
109 | |
110 namespace disk_cache { | |
111 | |
112 BackendImpl::BackendImpl( | |
113 const base::FilePath& path, | |
114 const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread, | |
115 net::NetLog* net_log) | |
116 : background_queue_(this, cache_thread), | |
117 path_(path), | |
118 block_files_(path), | |
119 mask_(0), | |
120 max_size_(0), | |
121 up_ticks_(0), | |
122 cache_type_(net::DISK_CACHE), | |
123 uma_report_(0), | |
124 user_flags_(0), | |
125 init_(false), | |
126 restarted_(false), | |
127 unit_test_(false), | |
128 read_only_(false), | |
129 disabled_(false), | |
130 new_eviction_(false), | |
131 first_timer_(true), | |
132 user_load_(false), | |
133 net_log_(net_log), | |
134 done_(true, false), | |
135 ptr_factory_(this) { | |
136 } | |
137 | |
138 BackendImpl::BackendImpl( | |
139 const base::FilePath& path, | |
140 uint32 mask, | |
141 const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread, | |
142 net::NetLog* net_log) | |
143 : background_queue_(this, cache_thread), | |
144 path_(path), | |
145 block_files_(path), | |
146 mask_(mask), | |
147 max_size_(0), | |
148 up_ticks_(0), | |
149 cache_type_(net::DISK_CACHE), | |
150 uma_report_(0), | |
151 user_flags_(kMask), | |
152 init_(false), | |
153 restarted_(false), | |
154 unit_test_(false), | |
155 read_only_(false), | |
156 disabled_(false), | |
157 new_eviction_(false), | |
158 first_timer_(true), | |
159 user_load_(false), | |
160 net_log_(net_log), | |
161 done_(true, false), | |
162 ptr_factory_(this) { | |
163 } | |
164 | |
165 BackendImpl::~BackendImpl() { | |
166 if (user_flags_ & kNoRandom) { | |
167 // This is a unit test, so we want to be strict about not leaking entries | |
168 // and completing all the work. | |
169 background_queue_.WaitForPendingIO(); | |
170 } else { | |
171 // This is most likely not a test, so we want to do as little work as | |
172 // possible at this time, at the price of leaving dirty entries behind. | |
173 background_queue_.DropPendingIO(); | |
174 } | |
175 | |
176 if (background_queue_.BackgroundIsCurrentThread()) { | |
177 // Unit tests may use the same thread for everything. | |
178 CleanupCache(); | |
179 } else { | |
180 background_queue_.background_thread()->PostTask( | |
181 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this))); | |
182 // http://crbug.com/74623 | |
183 base::ThreadRestrictions::ScopedAllowWait allow_wait; | |
184 done_.Wait(); | |
185 } | |
186 } | |
187 | |
188 int BackendImpl::Init(const CompletionCallback& callback) { | |
189 background_queue_.Init(callback); | |
190 return net::ERR_IO_PENDING; | |
191 } | |
192 | |
193 int BackendImpl::SyncInit() { | |
194 #if defined(NET_BUILD_STRESS_CACHE) | |
195 // Start evictions right away. | |
196 up_ticks_ = kTrimDelay * 2; | |
197 #endif | |
198 DCHECK(!init_); | |
199 if (init_) | |
200 return net::ERR_FAILED; | |
201 | |
202 bool create_files = false; | |
203 if (!InitBackingStore(&create_files)) { | |
204 ReportError(ERR_STORAGE_ERROR); | |
205 return net::ERR_FAILED; | |
206 } | |
207 | |
208 num_refs_ = num_pending_io_ = max_refs_ = 0; | |
209 entry_count_ = byte_count_ = 0; | |
210 | |
211 bool should_create_timer = false; | |
212 if (!restarted_) { | |
213 buffer_bytes_ = 0; | |
214 trace_object_ = TraceObject::GetTraceObject(); | |
215 should_create_timer = true; | |
216 } | |
217 | |
218 init_ = true; | |
219 Trace("Init"); | |
220 | |
221 if (data_->header.experiment != NO_EXPERIMENT && | |
222 cache_type_ != net::DISK_CACHE) { | |
223 // No experiment for other caches. | |
224 return net::ERR_FAILED; | |
225 } | |
226 | |
227 if (!(user_flags_ & kNoRandom)) { | |
228 // The unit test controls directly what to test. | |
229 new_eviction_ = (cache_type_ == net::DISK_CACHE); | |
230 } | |
231 | |
232 if (!CheckIndex()) { | |
233 ReportError(ERR_INIT_FAILED); | |
234 return net::ERR_FAILED; | |
235 } | |
236 | |
237 if (!restarted_ && (create_files || !data_->header.num_entries)) | |
238 ReportError(ERR_CACHE_CREATED); | |
239 | |
240 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE && | |
241 !InitExperiment(&data_->header, create_files)) { | |
242 return net::ERR_FAILED; | |
243 } | |
244 | |
245 // We don't care if the value overflows. The only thing we care about is that | |
246 // the id cannot be zero, because that value is used as "not dirty". | |
247 // Increasing the value once per second gives us many years before we start | |
248 // having collisions. | |
249 data_->header.this_id++; | |
250 if (!data_->header.this_id) | |
251 data_->header.this_id++; | |
252 | |
253 bool previous_crash = (data_->header.crash != 0); | |
254 data_->header.crash = 1; | |
255 | |
256 if (!block_files_.Init(create_files)) | |
257 return net::ERR_FAILED; | |
258 | |
259 // We want to minimize the changes to cache for an AppCache. | |
260 if (cache_type() == net::APP_CACHE) { | |
261 DCHECK(!new_eviction_); | |
262 read_only_ = true; | |
263 } else if (cache_type() == net::SHADER_CACHE) { | |
264 DCHECK(!new_eviction_); | |
265 } | |
266 | |
267 eviction_.Init(this); | |
268 | |
269 // stats_ and rankings_ may end up calling back to us so we better be enabled. | |
270 disabled_ = false; | |
271 if (!InitStats()) | |
272 return net::ERR_FAILED; | |
273 | |
274 disabled_ = !rankings_.Init(this, new_eviction_); | |
275 | |
276 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) | |
277 trace_object_->EnableTracing(false); | |
278 int sc = SelfCheck(); | |
279 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) | |
280 NOTREACHED(); | |
281 trace_object_->EnableTracing(true); | |
282 #endif | |
283 | |
284 if (previous_crash) { | |
285 ReportError(ERR_PREVIOUS_CRASH); | |
286 } else if (!restarted_) { | |
287 ReportError(ERR_NO_ERROR); | |
288 } | |
289 | |
290 FlushIndex(); | |
291 | |
292 if (!disabled_ && should_create_timer) { | |
293 // Create a recurrent timer of 30 secs. | |
294 int timer_delay = unit_test_ ? 1000 : 30000; | |
295 timer_.reset(new base::RepeatingTimer<BackendImpl>()); | |
296 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, | |
297 &BackendImpl::OnStatsTimer); | |
298 } | |
299 | |
300 return disabled_ ? net::ERR_FAILED : net::OK; | |
301 } | |
302 | |
303 void BackendImpl::CleanupCache() { | |
304 Trace("Backend Cleanup"); | |
305 eviction_.Stop(); | |
306 timer_.reset(); | |
307 | |
308 if (init_) { | |
309 StoreStats(); | |
310 if (data_) | |
311 data_->header.crash = 0; | |
312 | |
313 if (user_flags_ & kNoRandom) { | |
314 // This is a net_unittest, verify that we are not 'leaking' entries. | |
315 File::WaitForPendingIO(&num_pending_io_); | |
316 DCHECK(!num_refs_); | |
317 } else { | |
318 File::DropPendingIO(); | |
319 } | |
320 } | |
321 block_files_.CloseFiles(); | |
322 FlushIndex(); | |
323 index_ = NULL; | |
324 ptr_factory_.InvalidateWeakPtrs(); | |
325 done_.Signal(); | |
326 } | |
327 | |
328 // ------------------------------------------------------------------------ | |
329 | |
330 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) { | |
331 DCHECK(entry); | |
332 *entry = OpenEntryImpl(key); | |
333 return (*entry) ? net::OK : net::ERR_FAILED; | |
334 } | |
335 | |
336 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) { | |
337 DCHECK(entry); | |
338 *entry = CreateEntryImpl(key); | |
339 return (*entry) ? net::OK : net::ERR_FAILED; | |
340 } | |
341 | |
342 int BackendImpl::SyncDoomEntry(const std::string& key) { | |
343 if (disabled_) | |
344 return net::ERR_FAILED; | |
345 | |
346 EntryImpl* entry = OpenEntryImpl(key); | |
347 if (!entry) | |
348 return net::ERR_FAILED; | |
349 | |
350 entry->DoomImpl(); | |
351 entry->Release(); | |
352 return net::OK; | |
353 } | |
354 | |
355 int BackendImpl::SyncDoomAllEntries() { | |
356 // This is not really an error, but it is an interesting condition. | |
357 ReportError(ERR_CACHE_DOOMED); | |
358 stats_.OnEvent(Stats::DOOM_CACHE); | |
359 if (!num_refs_) { | |
360 RestartCache(false); | |
361 return disabled_ ? net::ERR_FAILED : net::OK; | |
362 } else { | |
363 if (disabled_) | |
364 return net::ERR_FAILED; | |
365 | |
366 eviction_.TrimCache(true); | |
367 return net::OK; | |
368 } | |
369 } | |
370 | |
371 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time, | |
372 const base::Time end_time) { | |
373 DCHECK_NE(net::APP_CACHE, cache_type_); | |
374 if (end_time.is_null()) | |
375 return SyncDoomEntriesSince(initial_time); | |
376 | |
377 DCHECK(end_time >= initial_time); | |
378 | |
379 if (disabled_) | |
380 return net::ERR_FAILED; | |
381 | |
382 EntryImpl* node; | |
383 scoped_ptr<Rankings::Iterator> iterator(new Rankings::Iterator()); | |
384 EntryImpl* next = OpenNextEntryImpl(iterator.get()); | |
385 if (!next) | |
386 return net::OK; | |
387 | |
388 while (next) { | |
389 node = next; | |
390 next = OpenNextEntryImpl(iterator.get()); | |
391 | |
392 if (node->GetLastUsed() >= initial_time && | |
393 node->GetLastUsed() < end_time) { | |
394 node->DoomImpl(); | |
395 } else if (node->GetLastUsed() < initial_time) { | |
396 if (next) | |
397 next->Release(); | |
398 next = NULL; | |
399 SyncEndEnumeration(iterator.Pass()); | |
400 } | |
401 | |
402 node->Release(); | |
403 } | |
404 | |
405 return net::OK; | |
406 } | |
407 | |
408 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get | |
409 // entries that are too old. | |
410 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) { | |
411 DCHECK_NE(net::APP_CACHE, cache_type_); | |
412 if (disabled_) | |
413 return net::ERR_FAILED; | |
414 | |
415 stats_.OnEvent(Stats::DOOM_RECENT); | |
416 for (;;) { | |
417 scoped_ptr<Rankings::Iterator> iterator(new Rankings::Iterator()); | |
418 EntryImpl* entry = OpenNextEntryImpl(iterator.get()); | |
419 if (!entry) | |
420 return net::OK; | |
421 | |
422 if (initial_time > entry->GetLastUsed()) { | |
423 entry->Release(); | |
424 SyncEndEnumeration(iterator.Pass()); | |
425 return net::OK; | |
426 } | |
427 | |
428 entry->DoomImpl(); | |
429 entry->Release(); | |
430 SyncEndEnumeration(iterator.Pass()); // The doom invalidated the iterator. | |
431 } | |
432 } | |
433 | |
434 int BackendImpl::SyncOpenNextEntry(Rankings::Iterator* iterator, | |
435 Entry** next_entry) { | |
436 *next_entry = OpenNextEntryImpl(iterator); | |
437 return (*next_entry) ? net::OK : net::ERR_FAILED; | |
438 } | |
439 | |
440 void BackendImpl::SyncEndEnumeration(scoped_ptr<Rankings::Iterator> iterator) { | |
441 iterator->Reset(); | |
442 } | |
443 | |
444 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) { | |
445 if (disabled_) | |
446 return; | |
447 | |
448 uint32 hash = base::Hash(key); | |
449 bool error; | |
450 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); | |
451 if (cache_entry) { | |
452 if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) { | |
453 UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE); | |
454 } | |
455 cache_entry->Release(); | |
456 } | |
457 } | |
458 | |
459 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) { | |
460 if (disabled_) | |
461 return NULL; | |
462 | |
463 TimeTicks start = TimeTicks::Now(); | |
464 uint32 hash = base::Hash(key); | |
465 Trace("Open hash 0x%x", hash); | |
466 | |
467 bool error; | |
468 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); | |
469 if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) { | |
470 // The entry was already evicted. | |
471 cache_entry->Release(); | |
472 cache_entry = NULL; | |
473 web_fonts_histogram::RecordEvictedEntry(key); | |
474 } else if (!cache_entry) { | |
475 web_fonts_histogram::RecordCacheMiss(key); | |
476 } | |
477 | |
478 int current_size = data_->header.num_bytes / (1024 * 1024); | |
479 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; | |
480 int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; | |
481 int64 use_hours = total_hours - no_use_hours; | |
482 | |
483 if (!cache_entry) { | |
484 CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start); | |
485 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size); | |
486 CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, | |
487 static_cast<base::HistogramBase::Sample>(total_hours)); | |
488 CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, | |
489 static_cast<base::HistogramBase::Sample>(use_hours)); | |
490 stats_.OnEvent(Stats::OPEN_MISS); | |
491 return NULL; | |
492 } | |
493 | |
494 eviction_.OnOpenEntry(cache_entry); | |
495 entry_count_++; | |
496 | |
497 Trace("Open hash 0x%x end: 0x%x", hash, | |
498 cache_entry->entry()->address().value()); | |
499 CACHE_UMA(AGE_MS, "OpenTime", 0, start); | |
500 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size); | |
501 CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, | |
502 static_cast<base::HistogramBase::Sample>(total_hours)); | |
503 CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, | |
504 static_cast<base::HistogramBase::Sample>(use_hours)); | |
505 stats_.OnEvent(Stats::OPEN_HIT); | |
506 web_fonts_histogram::RecordCacheHit(cache_entry); | |
507 return cache_entry; | |
508 } | |
509 | |
510 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { | |
511 if (disabled_ || key.empty()) | |
512 return NULL; | |
513 | |
514 TimeTicks start = TimeTicks::Now(); | |
515 uint32 hash = base::Hash(key); | |
516 Trace("Create hash 0x%x", hash); | |
517 | |
518 scoped_refptr<EntryImpl> parent; | |
519 Addr entry_address(data_->table[hash & mask_]); | |
520 if (entry_address.is_initialized()) { | |
521 // We have an entry already. It could be the one we are looking for, or just | |
522 // a hash conflict. | |
523 bool error; | |
524 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error); | |
525 if (old_entry) | |
526 return ResurrectEntry(old_entry); | |
527 | |
528 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error); | |
529 DCHECK(!error); | |
530 if (parent_entry) { | |
531 parent.swap(&parent_entry); | |
532 } else if (data_->table[hash & mask_]) { | |
533 // We should have corrected the problem. | |
534 NOTREACHED(); | |
535 return NULL; | |
536 } | |
537 } | |
538 | |
539 // The general flow is to allocate disk space and initialize the entry data, | |
540 // followed by saving that to disk, then linking the entry though the index | |
541 // and finally through the lists. If there is a crash in this process, we may | |
542 // end up with: | |
543 // a. Used, unreferenced empty blocks on disk (basically just garbage). | |
544 // b. Used, unreferenced but meaningful data on disk (more garbage). | |
545 // c. A fully formed entry, reachable only through the index. | |
546 // d. A fully formed entry, also reachable through the lists, but still dirty. | |
547 // | |
548 // Anything after (b) can be automatically cleaned up. We may consider saving | |
549 // the current operation (as we do while manipulating the lists) so that we | |
550 // can detect and cleanup (a) and (b). | |
551 | |
552 int num_blocks = EntryImpl::NumBlocksForEntry(key.size()); | |
553 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { | |
554 LOG(ERROR) << "Create entry failed " << key.c_str(); | |
555 stats_.OnEvent(Stats::CREATE_ERROR); | |
556 return NULL; | |
557 } | |
558 | |
559 Addr node_address(0); | |
560 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { | |
561 block_files_.DeleteBlock(entry_address, false); | |
562 LOG(ERROR) << "Create entry failed " << key.c_str(); | |
563 stats_.OnEvent(Stats::CREATE_ERROR); | |
564 return NULL; | |
565 } | |
566 | |
567 scoped_refptr<EntryImpl> cache_entry( | |
568 new EntryImpl(this, entry_address, false)); | |
569 IncreaseNumRefs(); | |
570 | |
571 if (!cache_entry->CreateEntry(node_address, key, hash)) { | |
572 block_files_.DeleteBlock(entry_address, false); | |
573 block_files_.DeleteBlock(node_address, false); | |
574 LOG(ERROR) << "Create entry failed " << key.c_str(); | |
575 stats_.OnEvent(Stats::CREATE_ERROR); | |
576 return NULL; | |
577 } | |
578 | |
579 cache_entry->BeginLogging(net_log_, true); | |
580 | |
581 // We are not failing the operation; let's add this to the map. | |
582 open_entries_[entry_address.value()] = cache_entry.get(); | |
583 | |
584 // Save the entry. | |
585 cache_entry->entry()->Store(); | |
586 cache_entry->rankings()->Store(); | |
587 IncreaseNumEntries(); | |
588 entry_count_++; | |
589 | |
590 // Link this entry through the index. | |
591 if (parent.get()) { | |
592 parent->SetNextAddress(entry_address); | |
593 } else { | |
594 data_->table[hash & mask_] = entry_address.value(); | |
595 } | |
596 | |
597 // Link this entry through the lists. | |
598 eviction_.OnCreateEntry(cache_entry.get()); | |
599 | |
600 CACHE_UMA(AGE_MS, "CreateTime", 0, start); | |
601 stats_.OnEvent(Stats::CREATE_HIT); | |
602 Trace("create entry hit "); | |
603 FlushIndex(); | |
604 cache_entry->AddRef(); | |
605 return cache_entry.get(); | |
606 } | |
607 | |
608 EntryImpl* BackendImpl::OpenNextEntryImpl(Rankings::Iterator* iterator) { | |
609 if (disabled_) | |
610 return NULL; | |
611 | |
612 const int kListsToSearch = 3; | |
613 scoped_refptr<EntryImpl> entries[kListsToSearch]; | |
614 if (!iterator->my_rankings) { | |
615 iterator->my_rankings = &rankings_; | |
616 bool ret = false; | |
617 | |
618 // Get an entry from each list. | |
619 for (int i = 0; i < kListsToSearch; i++) { | |
620 EntryImpl* temp = NULL; | |
621 ret |= OpenFollowingEntryFromList(static_cast<Rankings::List>(i), | |
622 &iterator->nodes[i], &temp); | |
623 entries[i].swap(&temp); // The entry was already addref'd. | |
624 } | |
625 if (!ret) { | |
626 iterator->Reset(); | |
627 return NULL; | |
628 } | |
629 } else { | |
630 // Get the next entry from the last list, and the actual entries for the | |
631 // elements on the other lists. | |
632 for (int i = 0; i < kListsToSearch; i++) { | |
633 EntryImpl* temp = NULL; | |
634 if (iterator->list == i) { | |
635 OpenFollowingEntryFromList( | |
636 iterator->list, &iterator->nodes[i], &temp); | |
637 } else { | |
638 temp = GetEnumeratedEntry(iterator->nodes[i], | |
639 static_cast<Rankings::List>(i)); | |
640 } | |
641 | |
642 entries[i].swap(&temp); // The entry was already addref'd. | |
643 } | |
644 } | |
645 | |
646 int newest = -1; | |
647 int oldest = -1; | |
648 Time access_times[kListsToSearch]; | |
649 for (int i = 0; i < kListsToSearch; i++) { | |
650 if (entries[i].get()) { | |
651 access_times[i] = entries[i]->GetLastUsed(); | |
652 if (newest < 0) { | |
653 DCHECK_LT(oldest, 0); | |
654 newest = oldest = i; | |
655 continue; | |
656 } | |
657 if (access_times[i] > access_times[newest]) | |
658 newest = i; | |
659 if (access_times[i] < access_times[oldest]) | |
660 oldest = i; | |
661 } | |
662 } | |
663 | |
664 if (newest < 0 || oldest < 0) { | |
665 iterator->Reset(); | |
666 return NULL; | |
667 } | |
668 | |
669 EntryImpl* next_entry; | |
670 next_entry = entries[newest].get(); | |
671 iterator->list = static_cast<Rankings::List>(newest); | |
672 next_entry->AddRef(); | |
673 return next_entry; | |
674 } | |
675 | |
676 bool BackendImpl::SetMaxSize(int max_bytes) { | |
677 static_assert(sizeof(max_bytes) == sizeof(max_size_), | |
678 "unsupported int model"); | |
679 if (max_bytes < 0) | |
680 return false; | |
681 | |
682 // Zero size means use the default. | |
683 if (!max_bytes) | |
684 return true; | |
685 | |
686 // Avoid a DCHECK later on. | |
687 if (max_bytes >= kint32max - kint32max / 10) | |
688 max_bytes = kint32max - kint32max / 10 - 1; | |
689 | |
690 user_flags_ |= kMaxSize; | |
691 max_size_ = max_bytes; | |
692 return true; | |
693 } | |
694 | |
695 void BackendImpl::SetType(net::CacheType type) { | |
696 DCHECK_NE(net::MEMORY_CACHE, type); | |
697 cache_type_ = type; | |
698 } | |
699 | |
700 base::FilePath BackendImpl::GetFileName(Addr address) const { | |
701 if (!address.is_separate_file() || !address.is_initialized()) { | |
702 NOTREACHED(); | |
703 return base::FilePath(); | |
704 } | |
705 | |
706 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber()); | |
707 return path_.AppendASCII(tmp); | |
708 } | |
709 | |
710 MappedFile* BackendImpl::File(Addr address) { | |
711 if (disabled_) | |
712 return NULL; | |
713 return block_files_.GetFile(address); | |
714 } | |
715 | |
716 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() { | |
717 return background_queue_.GetWeakPtr(); | |
718 } | |
719 | |
720 bool BackendImpl::CreateExternalFile(Addr* address) { | |
721 int file_number = data_->header.last_file + 1; | |
722 Addr file_address(0); | |
723 bool success = false; | |
724 for (int i = 0; i < 0x0fffffff; i++, file_number++) { | |
725 if (!file_address.SetFileNumber(file_number)) { | |
726 file_number = 1; | |
727 continue; | |
728 } | |
729 base::FilePath name = GetFileName(file_address); | |
730 int flags = base::File::FLAG_READ | base::File::FLAG_WRITE | | |
731 base::File::FLAG_CREATE | base::File::FLAG_EXCLUSIVE_WRITE; | |
732 base::File file(name, flags); | |
733 if (!file.IsValid()) { | |
734 base::File::Error error = file.error_details(); | |
735 if (error != base::File::FILE_ERROR_EXISTS) { | |
736 LOG(ERROR) << "Unable to create file: " << error; | |
737 return false; | |
738 } | |
739 continue; | |
740 } | |
741 | |
742 success = true; | |
743 break; | |
744 } | |
745 | |
746 DCHECK(success); | |
747 if (!success) | |
748 return false; | |
749 | |
750 data_->header.last_file = file_number; | |
751 address->set_value(file_address.value()); | |
752 return true; | |
753 } | |
754 | |
755 bool BackendImpl::CreateBlock(FileType block_type, int block_count, | |
756 Addr* block_address) { | |
757 return block_files_.CreateBlock(block_type, block_count, block_address); | |
758 } | |
759 | |
760 void BackendImpl::DeleteBlock(Addr block_address, bool deep) { | |
761 block_files_.DeleteBlock(block_address, deep); | |
762 } | |
763 | |
764 LruData* BackendImpl::GetLruData() { | |
765 return &data_->header.lru; | |
766 } | |
767 | |
768 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) { | |
769 if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE)) | |
770 return; | |
771 eviction_.UpdateRank(entry, modified); | |
772 } | |
773 | |
774 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) { | |
775 Addr address(rankings->Data()->contents); | |
776 EntryImpl* cache_entry = NULL; | |
777 if (NewEntry(address, &cache_entry)) { | |
778 STRESS_NOTREACHED(); | |
779 return; | |
780 } | |
781 | |
782 uint32 hash = cache_entry->GetHash(); | |
783 cache_entry->Release(); | |
784 | |
785 // Anything on the table means that this entry is there. | |
786 if (data_->table[hash & mask_]) | |
787 return; | |
788 | |
789 data_->table[hash & mask_] = address.value(); | |
790 FlushIndex(); | |
791 } | |
792 | |
793 void BackendImpl::InternalDoomEntry(EntryImpl* entry) { | |
794 uint32 hash = entry->GetHash(); | |
795 std::string key = entry->GetKey(); | |
796 Addr entry_addr = entry->entry()->address(); | |
797 bool error; | |
798 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error); | |
799 CacheAddr child(entry->GetNextAddress()); | |
800 | |
801 Trace("Doom entry 0x%p", entry); | |
802 | |
803 if (!entry->doomed()) { | |
804 // We may have doomed this entry from within MatchEntry. | |
805 eviction_.OnDoomEntry(entry); | |
806 entry->InternalDoom(); | |
807 if (!new_eviction_) { | |
808 DecreaseNumEntries(); | |
809 } | |
810 stats_.OnEvent(Stats::DOOM_ENTRY); | |
811 } | |
812 | |
813 if (parent_entry) { | |
814 parent_entry->SetNextAddress(Addr(child)); | |
815 parent_entry->Release(); | |
816 } else if (!error) { | |
817 data_->table[hash & mask_] = child; | |
818 } | |
819 | |
820 FlushIndex(); | |
821 } | |
822 | |
823 #if defined(NET_BUILD_STRESS_CACHE) | |
824 | |
825 CacheAddr BackendImpl::GetNextAddr(Addr address) { | |
826 EntriesMap::iterator it = open_entries_.find(address.value()); | |
827 if (it != open_entries_.end()) { | |
828 EntryImpl* this_entry = it->second; | |
829 return this_entry->GetNextAddress(); | |
830 } | |
831 DCHECK(block_files_.IsValid(address)); | |
832 DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256); | |
833 | |
834 CacheEntryBlock entry(File(address), address); | |
835 CHECK(entry.Load()); | |
836 return entry.Data()->next; | |
837 } | |
838 | |
839 void BackendImpl::NotLinked(EntryImpl* entry) { | |
840 Addr entry_addr = entry->entry()->address(); | |
841 uint32 i = entry->GetHash() & mask_; | |
842 Addr address(data_->table[i]); | |
843 if (!address.is_initialized()) | |
844 return; | |
845 | |
846 for (;;) { | |
847 DCHECK(entry_addr.value() != address.value()); | |
848 address.set_value(GetNextAddr(address)); | |
849 if (!address.is_initialized()) | |
850 break; | |
851 } | |
852 } | |
853 #endif // NET_BUILD_STRESS_CACHE | |
854 | |
855 // An entry may be linked on the DELETED list for a while after being doomed. | |
856 // This function is called when we want to remove it. | |
857 void BackendImpl::RemoveEntry(EntryImpl* entry) { | |
858 #if defined(NET_BUILD_STRESS_CACHE) | |
859 NotLinked(entry); | |
860 #endif | |
861 if (!new_eviction_) | |
862 return; | |
863 | |
864 DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state); | |
865 | |
866 Trace("Remove entry 0x%p", entry); | |
867 eviction_.OnDestroyEntry(entry); | |
868 DecreaseNumEntries(); | |
869 } | |
870 | |
871 void BackendImpl::OnEntryDestroyBegin(Addr address) { | |
872 EntriesMap::iterator it = open_entries_.find(address.value()); | |
873 if (it != open_entries_.end()) | |
874 open_entries_.erase(it); | |
875 } | |
876 | |
877 void BackendImpl::OnEntryDestroyEnd() { | |
878 DecreaseNumRefs(); | |
879 if (data_->header.num_bytes > max_size_ && !read_only_ && | |
880 (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom)) | |
881 eviction_.TrimCache(false); | |
882 } | |
883 | |
884 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const { | |
885 DCHECK(rankings->HasData()); | |
886 EntriesMap::const_iterator it = | |
887 open_entries_.find(rankings->Data()->contents); | |
888 if (it != open_entries_.end()) { | |
889 // We have this entry in memory. | |
890 return it->second; | |
891 } | |
892 | |
893 return NULL; | |
894 } | |
895 | |
896 int32 BackendImpl::GetCurrentEntryId() const { | |
897 return data_->header.this_id; | |
898 } | |
899 | |
900 int BackendImpl::MaxFileSize() const { | |
901 return cache_type() == net::PNACL_CACHE ? max_size_ : max_size_ / 8; | |
902 } | |
903 | |
904 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { | |
905 if (disabled_ || old_size == new_size) | |
906 return; | |
907 if (old_size > new_size) | |
908 SubstractStorageSize(old_size - new_size); | |
909 else | |
910 AddStorageSize(new_size - old_size); | |
911 | |
912 FlushIndex(); | |
913 | |
914 // Update the usage statistics. | |
915 stats_.ModifyStorageStats(old_size, new_size); | |
916 } | |
917 | |
918 void BackendImpl::TooMuchStorageRequested(int32 size) { | |
919 stats_.ModifyStorageStats(0, size); | |
920 } | |
921 | |
922 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) { | |
923 DCHECK_GT(new_size, current_size); | |
924 if (user_flags_ & kNoBuffering) | |
925 return false; | |
926 | |
927 int to_add = new_size - current_size; | |
928 if (buffer_bytes_ + to_add > MaxBuffersSize()) | |
929 return false; | |
930 | |
931 buffer_bytes_ += to_add; | |
932 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024); | |
933 return true; | |
934 } | |
935 | |
936 void BackendImpl::BufferDeleted(int size) { | |
937 buffer_bytes_ -= size; | |
938 DCHECK_GE(size, 0); | |
939 } | |
940 | |
941 bool BackendImpl::IsLoaded() const { | |
942 CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_); | |
943 if (user_flags_ & kNoLoadProtection) | |
944 return false; | |
945 | |
946 return (num_pending_io_ > 5 || user_load_); | |
947 } | |
948 | |
949 std::string BackendImpl::HistogramName(const char* name, int experiment) const { | |
950 if (!experiment) | |
951 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name); | |
952 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_, | |
953 name, experiment); | |
954 } | |
955 | |
956 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() { | |
957 return ptr_factory_.GetWeakPtr(); | |
958 } | |
959 | |
960 // We want to remove biases from some histograms so we only send data once per | |
961 // week. | |
962 bool BackendImpl::ShouldReportAgain() { | |
963 if (uma_report_) | |
964 return uma_report_ == 2; | |
965 | |
966 uma_report_++; | |
967 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); | |
968 Time last_time = Time::FromInternalValue(last_report); | |
969 if (!last_report || (Time::Now() - last_time).InDays() >= 7) { | |
970 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); | |
971 uma_report_++; | |
972 return true; | |
973 } | |
974 return false; | |
975 } | |
976 | |
977 void BackendImpl::FirstEviction() { | |
978 DCHECK(data_->header.create_time); | |
979 if (!GetEntryCount()) | |
980 return; // This is just for unit tests. | |
981 | |
982 Time create_time = Time::FromInternalValue(data_->header.create_time); | |
983 CACHE_UMA(AGE, "FillupAge", 0, create_time); | |
984 | |
985 int64 use_time = stats_.GetCounter(Stats::TIMER); | |
986 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120)); | |
987 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio()); | |
988 | |
989 if (!use_time) | |
990 use_time = 1; | |
991 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0, | |
992 static_cast<int>(data_->header.num_entries / use_time)); | |
993 CACHE_UMA(COUNTS, "FirstByteIORate", 0, | |
994 static_cast<int>((data_->header.num_bytes / 1024) / use_time)); | |
995 | |
996 int avg_size = data_->header.num_bytes / GetEntryCount(); | |
997 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size); | |
998 | |
999 int large_entries_bytes = stats_.GetLargeEntriesSize(); | |
1000 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; | |
1001 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio); | |
1002 | |
1003 if (new_eviction_) { | |
1004 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); | |
1005 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, | |
1006 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); | |
1007 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, | |
1008 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); | |
1009 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, | |
1010 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); | |
1011 } | |
1012 | |
1013 stats_.ResetRatios(); | |
1014 } | |
1015 | |
1016 void BackendImpl::CriticalError(int error) { | |
1017 STRESS_NOTREACHED(); | |
1018 LOG(ERROR) << "Critical error found " << error; | |
1019 if (disabled_) | |
1020 return; | |
1021 | |
1022 stats_.OnEvent(Stats::FATAL_ERROR); | |
1023 LogStats(); | |
1024 ReportError(error); | |
1025 | |
1026 // Setting the index table length to an invalid value will force re-creation | |
1027 // of the cache files. | |
1028 data_->header.table_len = 1; | |
1029 disabled_ = true; | |
1030 | |
1031 if (!num_refs_) | |
1032 base::MessageLoop::current()->PostTask( | |
1033 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true)); | |
1034 } | |
1035 | |
1036 void BackendImpl::ReportError(int error) { | |
1037 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH || | |
1038 error == ERR_CACHE_CREATED); | |
1039 | |
1040 // We transmit positive numbers, instead of direct error codes. | |
1041 DCHECK_LE(error, 0); | |
1042 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); | |
1043 } | |
1044 | |
1045 void BackendImpl::OnEvent(Stats::Counters an_event) { | |
1046 stats_.OnEvent(an_event); | |
1047 } | |
1048 | |
1049 void BackendImpl::OnRead(int32 bytes) { | |
1050 DCHECK_GE(bytes, 0); | |
1051 byte_count_ += bytes; | |
1052 if (byte_count_ < 0) | |
1053 byte_count_ = kint32max; | |
1054 } | |
1055 | |
1056 void BackendImpl::OnWrite(int32 bytes) { | |
1057 // We use the same implementation as OnRead... just log the number of bytes. | |
1058 OnRead(bytes); | |
1059 } | |
1060 | |
1061 void BackendImpl::OnStatsTimer() { | |
1062 if (disabled_) | |
1063 return; | |
1064 | |
1065 stats_.OnEvent(Stats::TIMER); | |
1066 int64 time = stats_.GetCounter(Stats::TIMER); | |
1067 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); | |
1068 | |
1069 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding | |
1070 // the bias towards 0. | |
1071 if (num_refs_ && (current != num_refs_)) { | |
1072 int64 diff = (num_refs_ - current) / 50; | |
1073 if (!diff) | |
1074 diff = num_refs_ > current ? 1 : -1; | |
1075 current = current + diff; | |
1076 stats_.SetCounter(Stats::OPEN_ENTRIES, current); | |
1077 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); | |
1078 } | |
1079 | |
1080 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_); | |
1081 | |
1082 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_); | |
1083 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024); | |
1084 | |
1085 // These values cover about 99.5% of the population (Oct 2011). | |
1086 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024); | |
1087 entry_count_ = 0; | |
1088 byte_count_ = 0; | |
1089 up_ticks_++; | |
1090 | |
1091 if (!data_) | |
1092 first_timer_ = false; | |
1093 if (first_timer_) { | |
1094 first_timer_ = false; | |
1095 if (ShouldReportAgain()) | |
1096 ReportStats(); | |
1097 } | |
1098 | |
1099 // Save stats to disk at 5 min intervals. | |
1100 if (time % 10 == 0) | |
1101 StoreStats(); | |
1102 } | |
1103 | |
1104 void BackendImpl::IncrementIoCount() { | |
1105 num_pending_io_++; | |
1106 } | |
1107 | |
1108 void BackendImpl::DecrementIoCount() { | |
1109 num_pending_io_--; | |
1110 } | |
1111 | |
1112 void BackendImpl::SetUnitTestMode() { | |
1113 user_flags_ |= kUnitTestMode; | |
1114 unit_test_ = true; | |
1115 } | |
1116 | |
1117 void BackendImpl::SetUpgradeMode() { | |
1118 user_flags_ |= kUpgradeMode; | |
1119 read_only_ = true; | |
1120 } | |
1121 | |
1122 void BackendImpl::SetNewEviction() { | |
1123 user_flags_ |= kNewEviction; | |
1124 new_eviction_ = true; | |
1125 } | |
1126 | |
1127 void BackendImpl::SetFlags(uint32 flags) { | |
1128 user_flags_ |= flags; | |
1129 } | |
1130 | |
1131 void BackendImpl::ClearRefCountForTest() { | |
1132 num_refs_ = 0; | |
1133 } | |
1134 | |
1135 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) { | |
1136 background_queue_.FlushQueue(callback); | |
1137 return net::ERR_IO_PENDING; | |
1138 } | |
1139 | |
1140 int BackendImpl::RunTaskForTest(const base::Closure& task, | |
1141 const CompletionCallback& callback) { | |
1142 background_queue_.RunTask(task, callback); | |
1143 return net::ERR_IO_PENDING; | |
1144 } | |
1145 | |
1146 void BackendImpl::TrimForTest(bool empty) { | |
1147 eviction_.SetTestMode(); | |
1148 eviction_.TrimCache(empty); | |
1149 } | |
1150 | |
1151 void BackendImpl::TrimDeletedListForTest(bool empty) { | |
1152 eviction_.SetTestMode(); | |
1153 eviction_.TrimDeletedList(empty); | |
1154 } | |
1155 | |
1156 base::RepeatingTimer<BackendImpl>* BackendImpl::GetTimerForTest() { | |
1157 return timer_.get(); | |
1158 } | |
1159 | |
1160 int BackendImpl::SelfCheck() { | |
1161 if (!init_) { | |
1162 LOG(ERROR) << "Init failed"; | |
1163 return ERR_INIT_FAILED; | |
1164 } | |
1165 | |
1166 int num_entries = rankings_.SelfCheck(); | |
1167 if (num_entries < 0) { | |
1168 LOG(ERROR) << "Invalid rankings list, error " << num_entries; | |
1169 #if !defined(NET_BUILD_STRESS_CACHE) | |
1170 return num_entries; | |
1171 #endif | |
1172 } | |
1173 | |
1174 if (num_entries != data_->header.num_entries) { | |
1175 LOG(ERROR) << "Number of entries mismatch"; | |
1176 #if !defined(NET_BUILD_STRESS_CACHE) | |
1177 return ERR_NUM_ENTRIES_MISMATCH; | |
1178 #endif | |
1179 } | |
1180 | |
1181 return CheckAllEntries(); | |
1182 } | |
1183 | |
1184 void BackendImpl::FlushIndex() { | |
1185 if (index_.get() && !disabled_) | |
1186 index_->Flush(); | |
1187 } | |
1188 | |
1189 // ------------------------------------------------------------------------ | |
1190 | |
1191 net::CacheType BackendImpl::GetCacheType() const { | |
1192 return cache_type_; | |
1193 } | |
1194 | |
1195 int32 BackendImpl::GetEntryCount() const { | |
1196 if (!index_.get() || disabled_) | |
1197 return 0; | |
1198 // num_entries includes entries already evicted. | |
1199 int32 not_deleted = data_->header.num_entries - | |
1200 data_->header.lru.sizes[Rankings::DELETED]; | |
1201 | |
1202 if (not_deleted < 0) { | |
1203 NOTREACHED(); | |
1204 not_deleted = 0; | |
1205 } | |
1206 | |
1207 return not_deleted; | |
1208 } | |
1209 | |
1210 int BackendImpl::OpenEntry(const std::string& key, Entry** entry, | |
1211 const CompletionCallback& callback) { | |
1212 DCHECK(!callback.is_null()); | |
1213 background_queue_.OpenEntry(key, entry, callback); | |
1214 return net::ERR_IO_PENDING; | |
1215 } | |
1216 | |
1217 int BackendImpl::CreateEntry(const std::string& key, Entry** entry, | |
1218 const CompletionCallback& callback) { | |
1219 DCHECK(!callback.is_null()); | |
1220 background_queue_.CreateEntry(key, entry, callback); | |
1221 return net::ERR_IO_PENDING; | |
1222 } | |
1223 | |
1224 int BackendImpl::DoomEntry(const std::string& key, | |
1225 const CompletionCallback& callback) { | |
1226 DCHECK(!callback.is_null()); | |
1227 background_queue_.DoomEntry(key, callback); | |
1228 return net::ERR_IO_PENDING; | |
1229 } | |
1230 | |
1231 int BackendImpl::DoomAllEntries(const CompletionCallback& callback) { | |
1232 DCHECK(!callback.is_null()); | |
1233 background_queue_.DoomAllEntries(callback); | |
1234 return net::ERR_IO_PENDING; | |
1235 } | |
1236 | |
1237 int BackendImpl::DoomEntriesBetween(const base::Time initial_time, | |
1238 const base::Time end_time, | |
1239 const CompletionCallback& callback) { | |
1240 DCHECK(!callback.is_null()); | |
1241 background_queue_.DoomEntriesBetween(initial_time, end_time, callback); | |
1242 return net::ERR_IO_PENDING; | |
1243 } | |
1244 | |
1245 int BackendImpl::DoomEntriesSince(const base::Time initial_time, | |
1246 const CompletionCallback& callback) { | |
1247 DCHECK(!callback.is_null()); | |
1248 background_queue_.DoomEntriesSince(initial_time, callback); | |
1249 return net::ERR_IO_PENDING; | |
1250 } | |
1251 | |
1252 class BackendImpl::IteratorImpl : public Backend::Iterator { | |
1253 public: | |
1254 explicit IteratorImpl(base::WeakPtr<InFlightBackendIO> background_queue) | |
1255 : background_queue_(background_queue), | |
1256 iterator_(new Rankings::Iterator()) { | |
1257 } | |
1258 | |
1259 ~IteratorImpl() override { | |
1260 if (background_queue_) | |
1261 background_queue_->EndEnumeration(iterator_.Pass()); | |
1262 } | |
1263 | |
1264 int OpenNextEntry(Entry** next_entry, | |
1265 const net::CompletionCallback& callback) override { | |
1266 if (!background_queue_) | |
1267 return net::ERR_FAILED; | |
1268 background_queue_->OpenNextEntry(iterator_.get(), next_entry, callback); | |
1269 return net::ERR_IO_PENDING; | |
1270 } | |
1271 | |
1272 private: | |
1273 const base::WeakPtr<InFlightBackendIO> background_queue_; | |
1274 scoped_ptr<Rankings::Iterator> iterator_; | |
1275 }; | |
1276 | |
1277 scoped_ptr<Backend::Iterator> BackendImpl::CreateIterator() { | |
1278 return scoped_ptr<Backend::Iterator>(new IteratorImpl(GetBackgroundQueue())); | |
1279 } | |
1280 | |
1281 void BackendImpl::GetStats(StatsItems* stats) { | |
1282 if (disabled_) | |
1283 return; | |
1284 | |
1285 std::pair<std::string, std::string> item; | |
1286 | |
1287 item.first = "Entries"; | |
1288 item.second = base::StringPrintf("%d", data_->header.num_entries); | |
1289 stats->push_back(item); | |
1290 | |
1291 item.first = "Pending IO"; | |
1292 item.second = base::StringPrintf("%d", num_pending_io_); | |
1293 stats->push_back(item); | |
1294 | |
1295 item.first = "Max size"; | |
1296 item.second = base::StringPrintf("%d", max_size_); | |
1297 stats->push_back(item); | |
1298 | |
1299 item.first = "Current size"; | |
1300 item.second = base::StringPrintf("%d", data_->header.num_bytes); | |
1301 stats->push_back(item); | |
1302 | |
1303 item.first = "Cache type"; | |
1304 item.second = "Blockfile Cache"; | |
1305 stats->push_back(item); | |
1306 | |
1307 stats_.GetItems(stats); | |
1308 } | |
1309 | |
1310 void BackendImpl::OnExternalCacheHit(const std::string& key) { | |
1311 background_queue_.OnExternalCacheHit(key); | |
1312 } | |
1313 | |
1314 // ------------------------------------------------------------------------ | |
1315 | |
1316 // We just created a new file so we're going to write the header and set the | |
1317 // file length to include the hash table (zero filled). | |
1318 bool BackendImpl::CreateBackingStore(disk_cache::File* file) { | |
1319 AdjustMaxCacheSize(0); | |
1320 | |
1321 IndexHeader header; | |
1322 header.table_len = DesiredIndexTableLen(max_size_); | |
1323 | |
1324 // We need file version 2.1 for the new eviction algorithm. | |
1325 if (new_eviction_) | |
1326 header.version = 0x20001; | |
1327 | |
1328 header.create_time = Time::Now().ToInternalValue(); | |
1329 | |
1330 if (!file->Write(&header, sizeof(header), 0)) | |
1331 return false; | |
1332 | |
1333 return file->SetLength(GetIndexSize(header.table_len)); | |
1334 } | |
1335 | |
1336 bool BackendImpl::InitBackingStore(bool* file_created) { | |
1337 if (!base::CreateDirectory(path_)) | |
1338 return false; | |
1339 | |
1340 base::FilePath index_name = path_.AppendASCII(kIndexName); | |
1341 | |
1342 int flags = base::File::FLAG_READ | base::File::FLAG_WRITE | | |
1343 base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_EXCLUSIVE_WRITE; | |
1344 base::File base_file(index_name, flags); | |
1345 if (!base_file.IsValid()) | |
1346 return false; | |
1347 | |
1348 bool ret = true; | |
1349 *file_created = base_file.created(); | |
1350 | |
1351 scoped_refptr<disk_cache::File> file(new disk_cache::File(base_file.Pass())); | |
1352 if (*file_created) | |
1353 ret = CreateBackingStore(file.get()); | |
1354 | |
1355 file = NULL; | |
1356 if (!ret) | |
1357 return false; | |
1358 | |
1359 index_ = new MappedFile(); | |
1360 data_ = static_cast<Index*>(index_->Init(index_name, 0)); | |
1361 if (!data_) { | |
1362 LOG(ERROR) << "Unable to map Index file"; | |
1363 return false; | |
1364 } | |
1365 | |
1366 if (index_->GetLength() < sizeof(Index)) { | |
1367 // We verify this again on CheckIndex() but it's easier to make sure now | |
1368 // that the header is there. | |
1369 LOG(ERROR) << "Corrupt Index file"; | |
1370 return false; | |
1371 } | |
1372 | |
1373 return true; | |
1374 } | |
1375 | |
1376 // The maximum cache size will be either set explicitly by the caller, or | |
1377 // calculated by this code. | |
1378 void BackendImpl::AdjustMaxCacheSize(int table_len) { | |
1379 if (max_size_) | |
1380 return; | |
1381 | |
1382 // If table_len is provided, the index file exists. | |
1383 DCHECK(!table_len || data_->header.magic); | |
1384 | |
1385 // The user is not setting the size, let's figure it out. | |
1386 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); | |
1387 if (available < 0) { | |
1388 max_size_ = kDefaultCacheSize; | |
1389 return; | |
1390 } | |
1391 | |
1392 if (table_len) | |
1393 available += data_->header.num_bytes; | |
1394 | |
1395 max_size_ = PreferredCacheSize(available); | |
1396 | |
1397 if (!table_len) | |
1398 return; | |
1399 | |
1400 // If we already have a table, adjust the size to it. | |
1401 int current_max_size = MaxStorageSizeForTable(table_len); | |
1402 if (max_size_ > current_max_size) | |
1403 max_size_= current_max_size; | |
1404 } | |
1405 | |
1406 bool BackendImpl::InitStats() { | |
1407 Addr address(data_->header.stats); | |
1408 int size = stats_.StorageSize(); | |
1409 | |
1410 if (!address.is_initialized()) { | |
1411 FileType file_type = Addr::RequiredFileType(size); | |
1412 DCHECK_NE(file_type, EXTERNAL); | |
1413 int num_blocks = Addr::RequiredBlocks(size, file_type); | |
1414 | |
1415 if (!CreateBlock(file_type, num_blocks, &address)) | |
1416 return false; | |
1417 | |
1418 data_->header.stats = address.value(); | |
1419 return stats_.Init(NULL, 0, address); | |
1420 } | |
1421 | |
1422 if (!address.is_block_file()) { | |
1423 NOTREACHED(); | |
1424 return false; | |
1425 } | |
1426 | |
1427 // Load the required data. | |
1428 size = address.num_blocks() * address.BlockSize(); | |
1429 MappedFile* file = File(address); | |
1430 if (!file) | |
1431 return false; | |
1432 | |
1433 scoped_ptr<char[]> data(new char[size]); | |
1434 size_t offset = address.start_block() * address.BlockSize() + | |
1435 kBlockHeaderSize; | |
1436 if (!file->Read(data.get(), size, offset)) | |
1437 return false; | |
1438 | |
1439 if (!stats_.Init(data.get(), size, address)) | |
1440 return false; | |
1441 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain()) | |
1442 stats_.InitSizeHistogram(); | |
1443 return true; | |
1444 } | |
1445 | |
1446 void BackendImpl::StoreStats() { | |
1447 int size = stats_.StorageSize(); | |
1448 scoped_ptr<char[]> data(new char[size]); | |
1449 Addr address; | |
1450 size = stats_.SerializeStats(data.get(), size, &address); | |
1451 DCHECK(size); | |
1452 if (!address.is_initialized()) | |
1453 return; | |
1454 | |
1455 MappedFile* file = File(address); | |
1456 if (!file) | |
1457 return; | |
1458 | |
1459 size_t offset = address.start_block() * address.BlockSize() + | |
1460 kBlockHeaderSize; | |
1461 file->Write(data.get(), size, offset); // ignore result. | |
1462 } | |
1463 | |
1464 void BackendImpl::RestartCache(bool failure) { | |
1465 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); | |
1466 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE); | |
1467 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT); | |
1468 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); | |
1469 | |
1470 PrepareForRestart(); | |
1471 if (failure) { | |
1472 DCHECK(!num_refs_); | |
1473 DCHECK(!open_entries_.size()); | |
1474 DelayedCacheCleanup(path_); | |
1475 } else { | |
1476 DeleteCache(path_, false); | |
1477 } | |
1478 | |
1479 // Don't call Init() if directed by the unit test: we are simulating a failure | |
1480 // trying to re-enable the cache. | |
1481 if (unit_test_) | |
1482 init_ = true; // Let the destructor do proper cleanup. | |
1483 else if (SyncInit() == net::OK) { | |
1484 stats_.SetCounter(Stats::FATAL_ERROR, errors); | |
1485 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms); | |
1486 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms); | |
1487 stats_.SetCounter(Stats::LAST_REPORT, last_report); | |
1488 } | |
1489 } | |
1490 | |
1491 void BackendImpl::PrepareForRestart() { | |
1492 // Reset the mask_ if it was not given by the user. | |
1493 if (!(user_flags_ & kMask)) | |
1494 mask_ = 0; | |
1495 | |
1496 if (!(user_flags_ & kNewEviction)) | |
1497 new_eviction_ = false; | |
1498 | |
1499 disabled_ = true; | |
1500 data_->header.crash = 0; | |
1501 index_->Flush(); | |
1502 index_ = NULL; | |
1503 data_ = NULL; | |
1504 block_files_.CloseFiles(); | |
1505 rankings_.Reset(); | |
1506 init_ = false; | |
1507 restarted_ = true; | |
1508 } | |
1509 | |
1510 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) { | |
1511 EntriesMap::iterator it = open_entries_.find(address.value()); | |
1512 if (it != open_entries_.end()) { | |
1513 // Easy job. This entry is already in memory. | |
1514 EntryImpl* this_entry = it->second; | |
1515 this_entry->AddRef(); | |
1516 *entry = this_entry; | |
1517 return 0; | |
1518 } | |
1519 | |
1520 STRESS_DCHECK(block_files_.IsValid(address)); | |
1521 | |
1522 if (!address.SanityCheckForEntryV2()) { | |
1523 LOG(WARNING) << "Wrong entry address."; | |
1524 STRESS_NOTREACHED(); | |
1525 return ERR_INVALID_ADDRESS; | |
1526 } | |
1527 | |
1528 scoped_refptr<EntryImpl> cache_entry( | |
1529 new EntryImpl(this, address, read_only_)); | |
1530 IncreaseNumRefs(); | |
1531 *entry = NULL; | |
1532 | |
1533 TimeTicks start = TimeTicks::Now(); | |
1534 if (!cache_entry->entry()->Load()) | |
1535 return ERR_READ_FAILURE; | |
1536 | |
1537 if (IsLoaded()) { | |
1538 CACHE_UMA(AGE_MS, "LoadTime", 0, start); | |
1539 } | |
1540 | |
1541 if (!cache_entry->SanityCheck()) { | |
1542 LOG(WARNING) << "Messed up entry found."; | |
1543 STRESS_NOTREACHED(); | |
1544 return ERR_INVALID_ENTRY; | |
1545 } | |
1546 | |
1547 STRESS_DCHECK(block_files_.IsValid( | |
1548 Addr(cache_entry->entry()->Data()->rankings_node))); | |
1549 | |
1550 if (!cache_entry->LoadNodeAddress()) | |
1551 return ERR_READ_FAILURE; | |
1552 | |
1553 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) { | |
1554 STRESS_NOTREACHED(); | |
1555 cache_entry->SetDirtyFlag(0); | |
1556 // Don't remove this from the list (it is not linked properly). Instead, | |
1557 // break the link back to the entry because it is going away, and leave the | |
1558 // rankings node to be deleted if we find it through a list. | |
1559 rankings_.SetContents(cache_entry->rankings(), 0); | |
1560 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) { | |
1561 STRESS_NOTREACHED(); | |
1562 cache_entry->SetDirtyFlag(0); | |
1563 rankings_.SetContents(cache_entry->rankings(), address.value()); | |
1564 } | |
1565 | |
1566 if (!cache_entry->DataSanityCheck()) { | |
1567 LOG(WARNING) << "Messed up entry found."; | |
1568 cache_entry->SetDirtyFlag(0); | |
1569 cache_entry->FixForDelete(); | |
1570 } | |
1571 | |
1572 // Prevent overwriting the dirty flag on the destructor. | |
1573 cache_entry->SetDirtyFlag(GetCurrentEntryId()); | |
1574 | |
1575 if (cache_entry->dirty()) { | |
1576 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()), | |
1577 address.value()); | |
1578 } | |
1579 | |
1580 open_entries_[address.value()] = cache_entry.get(); | |
1581 | |
1582 cache_entry->BeginLogging(net_log_, false); | |
1583 cache_entry.swap(entry); | |
1584 return 0; | |
1585 } | |
1586 | |
1587 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash, | |
1588 bool find_parent, Addr entry_addr, | |
1589 bool* match_error) { | |
1590 Addr address(data_->table[hash & mask_]); | |
1591 scoped_refptr<EntryImpl> cache_entry, parent_entry; | |
1592 EntryImpl* tmp = NULL; | |
1593 bool found = false; | |
1594 std::set<CacheAddr> visited; | |
1595 *match_error = false; | |
1596 | |
1597 for (;;) { | |
1598 if (disabled_) | |
1599 break; | |
1600 | |
1601 if (visited.find(address.value()) != visited.end()) { | |
1602 // It's possible for a buggy version of the code to write a loop. Just | |
1603 // break it. | |
1604 Trace("Hash collision loop 0x%x", address.value()); | |
1605 address.set_value(0); | |
1606 parent_entry->SetNextAddress(address); | |
1607 } | |
1608 visited.insert(address.value()); | |
1609 | |
1610 if (!address.is_initialized()) { | |
1611 if (find_parent) | |
1612 found = true; | |
1613 break; | |
1614 } | |
1615 | |
1616 int error = NewEntry(address, &tmp); | |
1617 cache_entry.swap(&tmp); | |
1618 | |
1619 if (error || cache_entry->dirty()) { | |
1620 // This entry is dirty on disk (it was not properly closed): we cannot | |
1621 // trust it. | |
1622 Addr child(0); | |
1623 if (!error) | |
1624 child.set_value(cache_entry->GetNextAddress()); | |
1625 | |
1626 if (parent_entry.get()) { | |
1627 parent_entry->SetNextAddress(child); | |
1628 parent_entry = NULL; | |
1629 } else { | |
1630 data_->table[hash & mask_] = child.value(); | |
1631 } | |
1632 | |
1633 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(), | |
1634 address.value()); | |
1635 | |
1636 if (!error) { | |
1637 // It is important to call DestroyInvalidEntry after removing this | |
1638 // entry from the table. | |
1639 DestroyInvalidEntry(cache_entry.get()); | |
1640 cache_entry = NULL; | |
1641 } else { | |
1642 Trace("NewEntry failed on MatchEntry 0x%x", address.value()); | |
1643 } | |
1644 | |
1645 // Restart the search. | |
1646 address.set_value(data_->table[hash & mask_]); | |
1647 visited.clear(); | |
1648 continue; | |
1649 } | |
1650 | |
1651 DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_); | |
1652 if (cache_entry->IsSameEntry(key, hash)) { | |
1653 if (!cache_entry->Update()) | |
1654 cache_entry = NULL; | |
1655 found = true; | |
1656 if (find_parent && entry_addr.value() != address.value()) { | |
1657 Trace("Entry not on the index 0x%x", address.value()); | |
1658 *match_error = true; | |
1659 parent_entry = NULL; | |
1660 } | |
1661 break; | |
1662 } | |
1663 if (!cache_entry->Update()) | |
1664 cache_entry = NULL; | |
1665 parent_entry = cache_entry; | |
1666 cache_entry = NULL; | |
1667 if (!parent_entry.get()) | |
1668 break; | |
1669 | |
1670 address.set_value(parent_entry->GetNextAddress()); | |
1671 } | |
1672 | |
1673 if (parent_entry.get() && (!find_parent || !found)) | |
1674 parent_entry = NULL; | |
1675 | |
1676 if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) { | |
1677 *match_error = true; | |
1678 parent_entry = NULL; | |
1679 } | |
1680 | |
1681 if (cache_entry.get() && (find_parent || !found)) | |
1682 cache_entry = NULL; | |
1683 | |
1684 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp); | |
1685 FlushIndex(); | |
1686 return tmp; | |
1687 } | |
1688 | |
1689 bool BackendImpl::OpenFollowingEntryFromList(Rankings::List list, | |
1690 CacheRankingsBlock** from_entry, | |
1691 EntryImpl** next_entry) { | |
1692 if (disabled_) | |
1693 return false; | |
1694 | |
1695 if (!new_eviction_ && Rankings::NO_USE != list) | |
1696 return false; | |
1697 | |
1698 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry); | |
1699 CacheRankingsBlock* next_block = rankings_.GetNext(rankings.get(), list); | |
1700 Rankings::ScopedRankingsBlock next(&rankings_, next_block); | |
1701 *from_entry = NULL; | |
1702 | |
1703 *next_entry = GetEnumeratedEntry(next.get(), list); | |
1704 if (!*next_entry) | |
1705 return false; | |
1706 | |
1707 *from_entry = next.release(); | |
1708 return true; | |
1709 } | |
1710 | |
1711 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next, | |
1712 Rankings::List list) { | |
1713 if (!next || disabled_) | |
1714 return NULL; | |
1715 | |
1716 EntryImpl* entry; | |
1717 int rv = NewEntry(Addr(next->Data()->contents), &entry); | |
1718 if (rv) { | |
1719 STRESS_NOTREACHED(); | |
1720 rankings_.Remove(next, list, false); | |
1721 if (rv == ERR_INVALID_ADDRESS) { | |
1722 // There is nothing linked from the index. Delete the rankings node. | |
1723 DeleteBlock(next->address(), true); | |
1724 } | |
1725 return NULL; | |
1726 } | |
1727 | |
1728 if (entry->dirty()) { | |
1729 // We cannot trust this entry. | |
1730 InternalDoomEntry(entry); | |
1731 entry->Release(); | |
1732 return NULL; | |
1733 } | |
1734 | |
1735 if (!entry->Update()) { | |
1736 STRESS_NOTREACHED(); | |
1737 entry->Release(); | |
1738 return NULL; | |
1739 } | |
1740 | |
1741 // Note that it is unfortunate (but possible) for this entry to be clean, but | |
1742 // not actually the real entry. In other words, we could have lost this entry | |
1743 // from the index, and it could have been replaced with a newer one. It's not | |
1744 // worth checking that this entry is "the real one", so we just return it and | |
1745 // let the enumeration continue; this entry will be evicted at some point, and | |
1746 // the regular path will work with the real entry. With time, this problem | |
1747 // will disasappear because this scenario is just a bug. | |
1748 | |
1749 // Make sure that we save the key for later. | |
1750 entry->GetKey(); | |
1751 | |
1752 return entry; | |
1753 } | |
1754 | |
1755 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) { | |
1756 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) { | |
1757 deleted_entry->Release(); | |
1758 stats_.OnEvent(Stats::CREATE_MISS); | |
1759 Trace("create entry miss "); | |
1760 return NULL; | |
1761 } | |
1762 | |
1763 // We are attempting to create an entry and found out that the entry was | |
1764 // previously deleted. | |
1765 | |
1766 eviction_.OnCreateEntry(deleted_entry); | |
1767 entry_count_++; | |
1768 | |
1769 stats_.OnEvent(Stats::RESURRECT_HIT); | |
1770 Trace("Resurrect entry hit "); | |
1771 return deleted_entry; | |
1772 } | |
1773 | |
1774 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) { | |
1775 LOG(WARNING) << "Destroying invalid entry."; | |
1776 Trace("Destroying invalid entry 0x%p", entry); | |
1777 | |
1778 entry->SetPointerForInvalidEntry(GetCurrentEntryId()); | |
1779 | |
1780 eviction_.OnDoomEntry(entry); | |
1781 entry->InternalDoom(); | |
1782 | |
1783 if (!new_eviction_) | |
1784 DecreaseNumEntries(); | |
1785 stats_.OnEvent(Stats::INVALID_ENTRY); | |
1786 } | |
1787 | |
1788 void BackendImpl::AddStorageSize(int32 bytes) { | |
1789 data_->header.num_bytes += bytes; | |
1790 DCHECK_GE(data_->header.num_bytes, 0); | |
1791 } | |
1792 | |
1793 void BackendImpl::SubstractStorageSize(int32 bytes) { | |
1794 data_->header.num_bytes -= bytes; | |
1795 DCHECK_GE(data_->header.num_bytes, 0); | |
1796 } | |
1797 | |
1798 void BackendImpl::IncreaseNumRefs() { | |
1799 num_refs_++; | |
1800 if (max_refs_ < num_refs_) | |
1801 max_refs_ = num_refs_; | |
1802 } | |
1803 | |
1804 void BackendImpl::DecreaseNumRefs() { | |
1805 DCHECK(num_refs_); | |
1806 num_refs_--; | |
1807 | |
1808 if (!num_refs_ && disabled_) | |
1809 base::MessageLoop::current()->PostTask( | |
1810 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true)); | |
1811 } | |
1812 | |
1813 void BackendImpl::IncreaseNumEntries() { | |
1814 data_->header.num_entries++; | |
1815 DCHECK_GT(data_->header.num_entries, 0); | |
1816 } | |
1817 | |
1818 void BackendImpl::DecreaseNumEntries() { | |
1819 data_->header.num_entries--; | |
1820 if (data_->header.num_entries < 0) { | |
1821 NOTREACHED(); | |
1822 data_->header.num_entries = 0; | |
1823 } | |
1824 } | |
1825 | |
1826 void BackendImpl::LogStats() { | |
1827 StatsItems stats; | |
1828 GetStats(&stats); | |
1829 | |
1830 for (size_t index = 0; index < stats.size(); index++) | |
1831 VLOG(1) << stats[index].first << ": " << stats[index].second; | |
1832 } | |
1833 | |
1834 void BackendImpl::ReportStats() { | |
1835 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries); | |
1836 | |
1837 int current_size = data_->header.num_bytes / (1024 * 1024); | |
1838 int max_size = max_size_ / (1024 * 1024); | |
1839 int hit_ratio_as_percentage = stats_.GetHitRatio(); | |
1840 | |
1841 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size); | |
1842 // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the | |
1843 // ratio of that bin's total count to the count in the same bin in the Size2 | |
1844 // histogram. | |
1845 if (base::RandInt(0, 99) < hit_ratio_as_percentage) | |
1846 CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size); | |
1847 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size); | |
1848 if (!max_size) | |
1849 max_size++; | |
1850 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size); | |
1851 | |
1852 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0, | |
1853 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); | |
1854 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0, | |
1855 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); | |
1856 stats_.SetCounter(Stats::MAX_ENTRIES, 0); | |
1857 | |
1858 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0, | |
1859 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); | |
1860 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0, | |
1861 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); | |
1862 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0, | |
1863 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); | |
1864 stats_.SetCounter(Stats::FATAL_ERROR, 0); | |
1865 stats_.SetCounter(Stats::DOOM_CACHE, 0); | |
1866 stats_.SetCounter(Stats::DOOM_RECENT, 0); | |
1867 | |
1868 int age = (Time::Now() - | |
1869 Time::FromInternalValue(data_->header.create_time)).InHours(); | |
1870 if (age) | |
1871 CACHE_UMA(HOURS, "FilesAge", 0, age); | |
1872 | |
1873 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; | |
1874 if (!data_->header.create_time || !data_->header.lru.filled) { | |
1875 int cause = data_->header.create_time ? 0 : 1; | |
1876 if (!data_->header.lru.filled) | |
1877 cause |= 2; | |
1878 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause); | |
1879 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours)); | |
1880 return; | |
1881 } | |
1882 | |
1883 // This is an up to date client that will report FirstEviction() data. After | |
1884 // that event, start reporting this: | |
1885 | |
1886 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours)); | |
1887 // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total | |
1888 // time is the ratio of that bin's total count to the count in the same bin in | |
1889 // the TotalTime histogram. | |
1890 if (base::RandInt(0, 99) < hit_ratio_as_percentage) | |
1891 CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, static_cast<int>(total_hours)); | |
1892 | |
1893 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; | |
1894 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); | |
1895 | |
1896 // We may see users with no use_hours at this point if this is the first time | |
1897 // we are running this code. | |
1898 if (use_hours) | |
1899 use_hours = total_hours - use_hours; | |
1900 | |
1901 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) | |
1902 return; | |
1903 | |
1904 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours)); | |
1905 // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time | |
1906 // is the ratio of that bin's total count to the count in the same bin in the | |
1907 // UseTime histogram. | |
1908 if (base::RandInt(0, 99) < hit_ratio_as_percentage) | |
1909 CACHE_UMA(HOURS, "HitRatioByUseTime", 0, static_cast<int>(use_hours)); | |
1910 CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage); | |
1911 | |
1912 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; | |
1913 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate)); | |
1914 | |
1915 int avg_size = data_->header.num_bytes / GetEntryCount(); | |
1916 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); | |
1917 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries); | |
1918 | |
1919 CACHE_UMA(PERCENTAGE, "IndexLoad", 0, | |
1920 data_->header.num_entries * 100 / (mask_ + 1)); | |
1921 | |
1922 int large_entries_bytes = stats_.GetLargeEntriesSize(); | |
1923 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; | |
1924 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio); | |
1925 | |
1926 if (new_eviction_) { | |
1927 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio()); | |
1928 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, | |
1929 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); | |
1930 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, | |
1931 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); | |
1932 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, | |
1933 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); | |
1934 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0, | |
1935 data_->header.lru.sizes[4] * 100 / data_->header.num_entries); | |
1936 } | |
1937 | |
1938 stats_.ResetRatios(); | |
1939 stats_.SetCounter(Stats::TRIM_ENTRY, 0); | |
1940 | |
1941 if (cache_type_ == net::DISK_CACHE) | |
1942 block_files_.ReportStats(); | |
1943 } | |
1944 | |
1945 void BackendImpl::UpgradeTo2_1() { | |
1946 // 2.1 is basically the same as 2.0, except that new fields are actually | |
1947 // updated by the new eviction algorithm. | |
1948 DCHECK(0x20000 == data_->header.version); | |
1949 data_->header.version = 0x20001; | |
1950 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries; | |
1951 } | |
1952 | |
1953 bool BackendImpl::CheckIndex() { | |
1954 DCHECK(data_); | |
1955 | |
1956 size_t current_size = index_->GetLength(); | |
1957 if (current_size < sizeof(Index)) { | |
1958 LOG(ERROR) << "Corrupt Index file"; | |
1959 return false; | |
1960 } | |
1961 | |
1962 if (new_eviction_) { | |
1963 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1. | |
1964 if (kIndexMagic != data_->header.magic || | |
1965 kCurrentVersion >> 16 != data_->header.version >> 16) { | |
1966 LOG(ERROR) << "Invalid file version or magic"; | |
1967 return false; | |
1968 } | |
1969 if (kCurrentVersion == data_->header.version) { | |
1970 // We need file version 2.1 for the new eviction algorithm. | |
1971 UpgradeTo2_1(); | |
1972 } | |
1973 } else { | |
1974 if (kIndexMagic != data_->header.magic || | |
1975 kCurrentVersion != data_->header.version) { | |
1976 LOG(ERROR) << "Invalid file version or magic"; | |
1977 return false; | |
1978 } | |
1979 } | |
1980 | |
1981 if (!data_->header.table_len) { | |
1982 LOG(ERROR) << "Invalid table size"; | |
1983 return false; | |
1984 } | |
1985 | |
1986 if (current_size < GetIndexSize(data_->header.table_len) || | |
1987 data_->header.table_len & (kBaseTableLen - 1)) { | |
1988 LOG(ERROR) << "Corrupt Index file"; | |
1989 return false; | |
1990 } | |
1991 | |
1992 AdjustMaxCacheSize(data_->header.table_len); | |
1993 | |
1994 #if !defined(NET_BUILD_STRESS_CACHE) | |
1995 if (data_->header.num_bytes < 0 || | |
1996 (max_size_ < kint32max - kDefaultCacheSize && | |
1997 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) { | |
1998 LOG(ERROR) << "Invalid cache (current) size"; | |
1999 return false; | |
2000 } | |
2001 #endif | |
2002 | |
2003 if (data_->header.num_entries < 0) { | |
2004 LOG(ERROR) << "Invalid number of entries"; | |
2005 return false; | |
2006 } | |
2007 | |
2008 if (!mask_) | |
2009 mask_ = data_->header.table_len - 1; | |
2010 | |
2011 // Load the table into memory. | |
2012 return index_->Preload(); | |
2013 } | |
2014 | |
2015 int BackendImpl::CheckAllEntries() { | |
2016 int num_dirty = 0; | |
2017 int num_entries = 0; | |
2018 DCHECK(mask_ < kuint32max); | |
2019 for (unsigned int i = 0; i <= mask_; i++) { | |
2020 Addr address(data_->table[i]); | |
2021 if (!address.is_initialized()) | |
2022 continue; | |
2023 for (;;) { | |
2024 EntryImpl* tmp; | |
2025 int ret = NewEntry(address, &tmp); | |
2026 if (ret) { | |
2027 STRESS_NOTREACHED(); | |
2028 return ret; | |
2029 } | |
2030 scoped_refptr<EntryImpl> cache_entry; | |
2031 cache_entry.swap(&tmp); | |
2032 | |
2033 if (cache_entry->dirty()) | |
2034 num_dirty++; | |
2035 else if (CheckEntry(cache_entry.get())) | |
2036 num_entries++; | |
2037 else | |
2038 return ERR_INVALID_ENTRY; | |
2039 | |
2040 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_); | |
2041 address.set_value(cache_entry->GetNextAddress()); | |
2042 if (!address.is_initialized()) | |
2043 break; | |
2044 } | |
2045 } | |
2046 | |
2047 Trace("CheckAllEntries End"); | |
2048 if (num_entries + num_dirty != data_->header.num_entries) { | |
2049 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << | |
2050 " " << data_->header.num_entries; | |
2051 DCHECK_LT(num_entries, data_->header.num_entries); | |
2052 return ERR_NUM_ENTRIES_MISMATCH; | |
2053 } | |
2054 | |
2055 return num_dirty; | |
2056 } | |
2057 | |
2058 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { | |
2059 bool ok = block_files_.IsValid(cache_entry->entry()->address()); | |
2060 ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); | |
2061 EntryStore* data = cache_entry->entry()->Data(); | |
2062 for (size_t i = 0; i < arraysize(data->data_addr); i++) { | |
2063 if (data->data_addr[i]) { | |
2064 Addr address(data->data_addr[i]); | |
2065 if (address.is_block_file()) | |
2066 ok = ok && block_files_.IsValid(address); | |
2067 } | |
2068 } | |
2069 | |
2070 return ok && cache_entry->rankings()->VerifyHash(); | |
2071 } | |
2072 | |
2073 int BackendImpl::MaxBuffersSize() { | |
2074 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); | |
2075 static bool done = false; | |
2076 | |
2077 if (!done) { | |
2078 const int kMaxBuffersSize = 30 * 1024 * 1024; | |
2079 | |
2080 // We want to use up to 2% of the computer's memory. | |
2081 total_memory = total_memory * 2 / 100; | |
2082 if (total_memory > kMaxBuffersSize || total_memory <= 0) | |
2083 total_memory = kMaxBuffersSize; | |
2084 | |
2085 done = true; | |
2086 } | |
2087 | |
2088 return static_cast<int>(total_memory); | |
2089 } | |
2090 | |
2091 } // namespace disk_cache | |
OLD | NEW |