| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "net/disk_cache/backend_impl.h" | |
| 6 | |
| 7 #include "base/bind.h" | |
| 8 #include "base/bind_helpers.h" | |
| 9 #include "base/file_util.h" | |
| 10 #include "base/files/file_path.h" | |
| 11 #include "base/hash.h" | |
| 12 #include "base/message_loop/message_loop.h" | |
| 13 #include "base/metrics/field_trial.h" | |
| 14 #include "base/metrics/histogram.h" | |
| 15 #include "base/metrics/stats_counters.h" | |
| 16 #include "base/rand_util.h" | |
| 17 #include "base/strings/string_util.h" | |
| 18 #include "base/strings/stringprintf.h" | |
| 19 #include "base/sys_info.h" | |
| 20 #include "base/threading/thread_restrictions.h" | |
| 21 #include "base/time/time.h" | |
| 22 #include "base/timer/timer.h" | |
| 23 #include "net/base/net_errors.h" | |
| 24 #include "net/disk_cache/cache_util.h" | |
| 25 #include "net/disk_cache/disk_format.h" | |
| 26 #include "net/disk_cache/entry_impl.h" | |
| 27 #include "net/disk_cache/errors.h" | |
| 28 #include "net/disk_cache/experiments.h" | |
| 29 #include "net/disk_cache/file.h" | |
| 30 | |
| 31 // This has to be defined before including histogram_macros.h from this file. | |
| 32 #define NET_DISK_CACHE_BACKEND_IMPL_CC_ | |
| 33 #include "net/disk_cache/histogram_macros.h" | |
| 34 | |
| 35 using base::Time; | |
| 36 using base::TimeDelta; | |
| 37 using base::TimeTicks; | |
| 38 | |
| 39 namespace { | |
| 40 | |
| 41 const char* kIndexName = "index"; | |
| 42 | |
| 43 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. | |
| 44 // Note that the actual target is to keep the index table load factor under 55% | |
| 45 // for most users. | |
| 46 const int k64kEntriesStore = 240 * 1000 * 1000; | |
| 47 const int kBaseTableLen = 64 * 1024; | |
| 48 | |
| 49 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). | |
| 50 const int kTrimDelay = 10; | |
| 51 | |
| 52 int DesiredIndexTableLen(int32 storage_size) { | |
| 53 if (storage_size <= k64kEntriesStore) | |
| 54 return kBaseTableLen; | |
| 55 if (storage_size <= k64kEntriesStore * 2) | |
| 56 return kBaseTableLen * 2; | |
| 57 if (storage_size <= k64kEntriesStore * 4) | |
| 58 return kBaseTableLen * 4; | |
| 59 if (storage_size <= k64kEntriesStore * 8) | |
| 60 return kBaseTableLen * 8; | |
| 61 | |
| 62 // The biggest storage_size for int32 requires a 4 MB table. | |
| 63 return kBaseTableLen * 16; | |
| 64 } | |
| 65 | |
| 66 int MaxStorageSizeForTable(int table_len) { | |
| 67 return table_len * (k64kEntriesStore / kBaseTableLen); | |
| 68 } | |
| 69 | |
| 70 size_t GetIndexSize(int table_len) { | |
| 71 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; | |
| 72 return sizeof(disk_cache::IndexHeader) + table_size; | |
| 73 } | |
| 74 | |
| 75 // ------------------------------------------------------------------------ | |
| 76 | |
| 77 // Sets group for the current experiment. Returns false if the files should be | |
| 78 // discarded. | |
| 79 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) { | |
| 80 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 || | |
| 81 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) { | |
| 82 // Discard current cache. | |
| 83 return false; | |
| 84 } | |
| 85 | |
| 86 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") == | |
| 87 "ExperimentControl") { | |
| 88 if (cache_created) { | |
| 89 header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL; | |
| 90 return true; | |
| 91 } | |
| 92 return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL; | |
| 93 } | |
| 94 | |
| 95 header->experiment = disk_cache::NO_EXPERIMENT; | |
| 96 return true; | |
| 97 } | |
| 98 | |
| 99 // A callback to perform final cleanup on the background thread. | |
| 100 void FinalCleanupCallback(disk_cache::BackendImpl* backend) { | |
| 101 backend->CleanupCache(); | |
| 102 } | |
| 103 | |
| 104 } // namespace | |
| 105 | |
| 106 // ------------------------------------------------------------------------ | |
| 107 | |
| 108 namespace disk_cache { | |
| 109 | |
| 110 BackendImpl::BackendImpl(const base::FilePath& path, | |
| 111 base::MessageLoopProxy* cache_thread, | |
| 112 net::NetLog* net_log) | |
| 113 : background_queue_(this, cache_thread), | |
| 114 path_(path), | |
| 115 block_files_(path), | |
| 116 mask_(0), | |
| 117 max_size_(0), | |
| 118 up_ticks_(0), | |
| 119 cache_type_(net::DISK_CACHE), | |
| 120 uma_report_(0), | |
| 121 user_flags_(0), | |
| 122 init_(false), | |
| 123 restarted_(false), | |
| 124 unit_test_(false), | |
| 125 read_only_(false), | |
| 126 disabled_(false), | |
| 127 new_eviction_(false), | |
| 128 first_timer_(true), | |
| 129 user_load_(false), | |
| 130 net_log_(net_log), | |
| 131 done_(true, false), | |
| 132 ptr_factory_(this) { | |
| 133 } | |
| 134 | |
| 135 BackendImpl::BackendImpl(const base::FilePath& path, | |
| 136 uint32 mask, | |
| 137 base::MessageLoopProxy* cache_thread, | |
| 138 net::NetLog* net_log) | |
| 139 : background_queue_(this, cache_thread), | |
| 140 path_(path), | |
| 141 block_files_(path), | |
| 142 mask_(mask), | |
| 143 max_size_(0), | |
| 144 up_ticks_(0), | |
| 145 cache_type_(net::DISK_CACHE), | |
| 146 uma_report_(0), | |
| 147 user_flags_(kMask), | |
| 148 init_(false), | |
| 149 restarted_(false), | |
| 150 unit_test_(false), | |
| 151 read_only_(false), | |
| 152 disabled_(false), | |
| 153 new_eviction_(false), | |
| 154 first_timer_(true), | |
| 155 user_load_(false), | |
| 156 net_log_(net_log), | |
| 157 done_(true, false), | |
| 158 ptr_factory_(this) { | |
| 159 } | |
| 160 | |
| 161 BackendImpl::~BackendImpl() { | |
| 162 if (user_flags_ & kNoRandom) { | |
| 163 // This is a unit test, so we want to be strict about not leaking entries | |
| 164 // and completing all the work. | |
| 165 background_queue_.WaitForPendingIO(); | |
| 166 } else { | |
| 167 // This is most likely not a test, so we want to do as little work as | |
| 168 // possible at this time, at the price of leaving dirty entries behind. | |
| 169 background_queue_.DropPendingIO(); | |
| 170 } | |
| 171 | |
| 172 if (background_queue_.BackgroundIsCurrentThread()) { | |
| 173 // Unit tests may use the same thread for everything. | |
| 174 CleanupCache(); | |
| 175 } else { | |
| 176 background_queue_.background_thread()->PostTask( | |
| 177 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this))); | |
| 178 // http://crbug.com/74623 | |
| 179 base::ThreadRestrictions::ScopedAllowWait allow_wait; | |
| 180 done_.Wait(); | |
| 181 } | |
| 182 } | |
| 183 | |
| 184 int BackendImpl::Init(const CompletionCallback& callback) { | |
| 185 background_queue_.Init(callback); | |
| 186 return net::ERR_IO_PENDING; | |
| 187 } | |
| 188 | |
| 189 int BackendImpl::SyncInit() { | |
| 190 #if defined(NET_BUILD_STRESS_CACHE) | |
| 191 // Start evictions right away. | |
| 192 up_ticks_ = kTrimDelay * 2; | |
| 193 #endif | |
| 194 DCHECK(!init_); | |
| 195 if (init_) | |
| 196 return net::ERR_FAILED; | |
| 197 | |
| 198 bool create_files = false; | |
| 199 if (!InitBackingStore(&create_files)) { | |
| 200 ReportError(ERR_STORAGE_ERROR); | |
| 201 return net::ERR_FAILED; | |
| 202 } | |
| 203 | |
| 204 num_refs_ = num_pending_io_ = max_refs_ = 0; | |
| 205 entry_count_ = byte_count_ = 0; | |
| 206 | |
| 207 bool should_create_timer = false; | |
| 208 if (!restarted_) { | |
| 209 buffer_bytes_ = 0; | |
| 210 trace_object_ = TraceObject::GetTraceObject(); | |
| 211 should_create_timer = true; | |
| 212 } | |
| 213 | |
| 214 init_ = true; | |
| 215 Trace("Init"); | |
| 216 | |
| 217 if (data_->header.experiment != NO_EXPERIMENT && | |
| 218 cache_type_ != net::DISK_CACHE) { | |
| 219 // No experiment for other caches. | |
| 220 return net::ERR_FAILED; | |
| 221 } | |
| 222 | |
| 223 if (!(user_flags_ & kNoRandom)) { | |
| 224 // The unit test controls directly what to test. | |
| 225 new_eviction_ = (cache_type_ == net::DISK_CACHE); | |
| 226 } | |
| 227 | |
| 228 if (!CheckIndex()) { | |
| 229 ReportError(ERR_INIT_FAILED); | |
| 230 return net::ERR_FAILED; | |
| 231 } | |
| 232 | |
| 233 if (!restarted_ && (create_files || !data_->header.num_entries)) | |
| 234 ReportError(ERR_CACHE_CREATED); | |
| 235 | |
| 236 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE && | |
| 237 !InitExperiment(&data_->header, create_files)) { | |
| 238 return net::ERR_FAILED; | |
| 239 } | |
| 240 | |
| 241 // We don't care if the value overflows. The only thing we care about is that | |
| 242 // the id cannot be zero, because that value is used as "not dirty". | |
| 243 // Increasing the value once per second gives us many years before we start | |
| 244 // having collisions. | |
| 245 data_->header.this_id++; | |
| 246 if (!data_->header.this_id) | |
| 247 data_->header.this_id++; | |
| 248 | |
| 249 bool previous_crash = (data_->header.crash != 0); | |
| 250 data_->header.crash = 1; | |
| 251 | |
| 252 if (!block_files_.Init(create_files)) | |
| 253 return net::ERR_FAILED; | |
| 254 | |
| 255 // We want to minimize the changes to cache for an AppCache. | |
| 256 if (cache_type() == net::APP_CACHE) { | |
| 257 DCHECK(!new_eviction_); | |
| 258 read_only_ = true; | |
| 259 } else if (cache_type() == net::SHADER_CACHE) { | |
| 260 DCHECK(!new_eviction_); | |
| 261 } | |
| 262 | |
| 263 eviction_.Init(this); | |
| 264 | |
| 265 // stats_ and rankings_ may end up calling back to us so we better be enabled. | |
| 266 disabled_ = false; | |
| 267 if (!InitStats()) | |
| 268 return net::ERR_FAILED; | |
| 269 | |
| 270 disabled_ = !rankings_.Init(this, new_eviction_); | |
| 271 | |
| 272 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) | |
| 273 trace_object_->EnableTracing(false); | |
| 274 int sc = SelfCheck(); | |
| 275 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) | |
| 276 NOTREACHED(); | |
| 277 trace_object_->EnableTracing(true); | |
| 278 #endif | |
| 279 | |
| 280 if (previous_crash) { | |
| 281 ReportError(ERR_PREVIOUS_CRASH); | |
| 282 } else if (!restarted_) { | |
| 283 ReportError(ERR_NO_ERROR); | |
| 284 } | |
| 285 | |
| 286 FlushIndex(); | |
| 287 | |
| 288 if (!disabled_ && should_create_timer) { | |
| 289 // Create a recurrent timer of 30 secs. | |
| 290 int timer_delay = unit_test_ ? 1000 : 30000; | |
| 291 timer_.reset(new base::RepeatingTimer<BackendImpl>()); | |
| 292 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, | |
| 293 &BackendImpl::OnStatsTimer); | |
| 294 } | |
| 295 | |
| 296 return disabled_ ? net::ERR_FAILED : net::OK; | |
| 297 } | |
| 298 | |
| 299 void BackendImpl::CleanupCache() { | |
| 300 Trace("Backend Cleanup"); | |
| 301 eviction_.Stop(); | |
| 302 timer_.reset(); | |
| 303 | |
| 304 if (init_) { | |
| 305 StoreStats(); | |
| 306 if (data_) | |
| 307 data_->header.crash = 0; | |
| 308 | |
| 309 if (user_flags_ & kNoRandom) { | |
| 310 // This is a net_unittest, verify that we are not 'leaking' entries. | |
| 311 File::WaitForPendingIO(&num_pending_io_); | |
| 312 DCHECK(!num_refs_); | |
| 313 } else { | |
| 314 File::DropPendingIO(); | |
| 315 } | |
| 316 } | |
| 317 block_files_.CloseFiles(); | |
| 318 FlushIndex(); | |
| 319 index_ = NULL; | |
| 320 ptr_factory_.InvalidateWeakPtrs(); | |
| 321 done_.Signal(); | |
| 322 } | |
| 323 | |
| 324 // ------------------------------------------------------------------------ | |
| 325 | |
| 326 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry, | |
| 327 const CompletionCallback& callback) { | |
| 328 DCHECK(!callback.is_null()); | |
| 329 background_queue_.OpenPrevEntry(iter, prev_entry, callback); | |
| 330 return net::ERR_IO_PENDING; | |
| 331 } | |
| 332 | |
| 333 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) { | |
| 334 DCHECK(entry); | |
| 335 *entry = OpenEntryImpl(key); | |
| 336 return (*entry) ? net::OK : net::ERR_FAILED; | |
| 337 } | |
| 338 | |
| 339 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) { | |
| 340 DCHECK(entry); | |
| 341 *entry = CreateEntryImpl(key); | |
| 342 return (*entry) ? net::OK : net::ERR_FAILED; | |
| 343 } | |
| 344 | |
| 345 int BackendImpl::SyncDoomEntry(const std::string& key) { | |
| 346 if (disabled_) | |
| 347 return net::ERR_FAILED; | |
| 348 | |
| 349 EntryImpl* entry = OpenEntryImpl(key); | |
| 350 if (!entry) | |
| 351 return net::ERR_FAILED; | |
| 352 | |
| 353 entry->DoomImpl(); | |
| 354 entry->Release(); | |
| 355 return net::OK; | |
| 356 } | |
| 357 | |
| 358 int BackendImpl::SyncDoomAllEntries() { | |
| 359 // This is not really an error, but it is an interesting condition. | |
| 360 ReportError(ERR_CACHE_DOOMED); | |
| 361 stats_.OnEvent(Stats::DOOM_CACHE); | |
| 362 if (!num_refs_) { | |
| 363 RestartCache(false); | |
| 364 return disabled_ ? net::ERR_FAILED : net::OK; | |
| 365 } else { | |
| 366 if (disabled_) | |
| 367 return net::ERR_FAILED; | |
| 368 | |
| 369 eviction_.TrimCache(true); | |
| 370 return net::OK; | |
| 371 } | |
| 372 } | |
| 373 | |
| 374 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time, | |
| 375 const base::Time end_time) { | |
| 376 DCHECK_NE(net::APP_CACHE, cache_type_); | |
| 377 if (end_time.is_null()) | |
| 378 return SyncDoomEntriesSince(initial_time); | |
| 379 | |
| 380 DCHECK(end_time >= initial_time); | |
| 381 | |
| 382 if (disabled_) | |
| 383 return net::ERR_FAILED; | |
| 384 | |
| 385 EntryImpl* node; | |
| 386 void* iter = NULL; | |
| 387 EntryImpl* next = OpenNextEntryImpl(&iter); | |
| 388 if (!next) | |
| 389 return net::OK; | |
| 390 | |
| 391 while (next) { | |
| 392 node = next; | |
| 393 next = OpenNextEntryImpl(&iter); | |
| 394 | |
| 395 if (node->GetLastUsed() >= initial_time && | |
| 396 node->GetLastUsed() < end_time) { | |
| 397 node->DoomImpl(); | |
| 398 } else if (node->GetLastUsed() < initial_time) { | |
| 399 if (next) | |
| 400 next->Release(); | |
| 401 next = NULL; | |
| 402 SyncEndEnumeration(iter); | |
| 403 } | |
| 404 | |
| 405 node->Release(); | |
| 406 } | |
| 407 | |
| 408 return net::OK; | |
| 409 } | |
| 410 | |
| 411 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get | |
| 412 // entries that are too old. | |
| 413 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) { | |
| 414 DCHECK_NE(net::APP_CACHE, cache_type_); | |
| 415 if (disabled_) | |
| 416 return net::ERR_FAILED; | |
| 417 | |
| 418 stats_.OnEvent(Stats::DOOM_RECENT); | |
| 419 for (;;) { | |
| 420 void* iter = NULL; | |
| 421 EntryImpl* entry = OpenNextEntryImpl(&iter); | |
| 422 if (!entry) | |
| 423 return net::OK; | |
| 424 | |
| 425 if (initial_time > entry->GetLastUsed()) { | |
| 426 entry->Release(); | |
| 427 SyncEndEnumeration(iter); | |
| 428 return net::OK; | |
| 429 } | |
| 430 | |
| 431 entry->DoomImpl(); | |
| 432 entry->Release(); | |
| 433 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator. | |
| 434 } | |
| 435 } | |
| 436 | |
| 437 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) { | |
| 438 *next_entry = OpenNextEntryImpl(iter); | |
| 439 return (*next_entry) ? net::OK : net::ERR_FAILED; | |
| 440 } | |
| 441 | |
| 442 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) { | |
| 443 *prev_entry = OpenPrevEntryImpl(iter); | |
| 444 return (*prev_entry) ? net::OK : net::ERR_FAILED; | |
| 445 } | |
| 446 | |
| 447 void BackendImpl::SyncEndEnumeration(void* iter) { | |
| 448 scoped_ptr<Rankings::Iterator> iterator( | |
| 449 reinterpret_cast<Rankings::Iterator*>(iter)); | |
| 450 } | |
| 451 | |
| 452 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) { | |
| 453 if (disabled_) | |
| 454 return; | |
| 455 | |
| 456 uint32 hash = base::Hash(key); | |
| 457 bool error; | |
| 458 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); | |
| 459 if (cache_entry) { | |
| 460 if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) { | |
| 461 UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE); | |
| 462 } | |
| 463 cache_entry->Release(); | |
| 464 } | |
| 465 } | |
| 466 | |
| 467 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) { | |
| 468 if (disabled_) | |
| 469 return NULL; | |
| 470 | |
| 471 TimeTicks start = TimeTicks::Now(); | |
| 472 uint32 hash = base::Hash(key); | |
| 473 Trace("Open hash 0x%x", hash); | |
| 474 | |
| 475 bool error; | |
| 476 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error); | |
| 477 if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) { | |
| 478 // The entry was already evicted. | |
| 479 cache_entry->Release(); | |
| 480 cache_entry = NULL; | |
| 481 } | |
| 482 | |
| 483 int current_size = data_->header.num_bytes / (1024 * 1024); | |
| 484 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; | |
| 485 int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; | |
| 486 int64 use_hours = total_hours - no_use_hours; | |
| 487 | |
| 488 if (!cache_entry) { | |
| 489 CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start); | |
| 490 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size); | |
| 491 CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours); | |
| 492 CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours); | |
| 493 stats_.OnEvent(Stats::OPEN_MISS); | |
| 494 return NULL; | |
| 495 } | |
| 496 | |
| 497 eviction_.OnOpenEntry(cache_entry); | |
| 498 entry_count_++; | |
| 499 | |
| 500 Trace("Open hash 0x%x end: 0x%x", hash, | |
| 501 cache_entry->entry()->address().value()); | |
| 502 CACHE_UMA(AGE_MS, "OpenTime", 0, start); | |
| 503 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size); | |
| 504 CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours); | |
| 505 CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours); | |
| 506 stats_.OnEvent(Stats::OPEN_HIT); | |
| 507 SIMPLE_STATS_COUNTER("disk_cache.hit"); | |
| 508 return cache_entry; | |
| 509 } | |
| 510 | |
| 511 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) { | |
| 512 if (disabled_ || key.empty()) | |
| 513 return NULL; | |
| 514 | |
| 515 TimeTicks start = TimeTicks::Now(); | |
| 516 uint32 hash = base::Hash(key); | |
| 517 Trace("Create hash 0x%x", hash); | |
| 518 | |
| 519 scoped_refptr<EntryImpl> parent; | |
| 520 Addr entry_address(data_->table[hash & mask_]); | |
| 521 if (entry_address.is_initialized()) { | |
| 522 // We have an entry already. It could be the one we are looking for, or just | |
| 523 // a hash conflict. | |
| 524 bool error; | |
| 525 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error); | |
| 526 if (old_entry) | |
| 527 return ResurrectEntry(old_entry); | |
| 528 | |
| 529 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error); | |
| 530 DCHECK(!error); | |
| 531 if (parent_entry) { | |
| 532 parent.swap(&parent_entry); | |
| 533 } else if (data_->table[hash & mask_]) { | |
| 534 // We should have corrected the problem. | |
| 535 NOTREACHED(); | |
| 536 return NULL; | |
| 537 } | |
| 538 } | |
| 539 | |
| 540 // The general flow is to allocate disk space and initialize the entry data, | |
| 541 // followed by saving that to disk, then linking the entry though the index | |
| 542 // and finally through the lists. If there is a crash in this process, we may | |
| 543 // end up with: | |
| 544 // a. Used, unreferenced empty blocks on disk (basically just garbage). | |
| 545 // b. Used, unreferenced but meaningful data on disk (more garbage). | |
| 546 // c. A fully formed entry, reachable only through the index. | |
| 547 // d. A fully formed entry, also reachable through the lists, but still dirty. | |
| 548 // | |
| 549 // Anything after (b) can be automatically cleaned up. We may consider saving | |
| 550 // the current operation (as we do while manipulating the lists) so that we | |
| 551 // can detect and cleanup (a) and (b). | |
| 552 | |
| 553 int num_blocks = EntryImpl::NumBlocksForEntry(key.size()); | |
| 554 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) { | |
| 555 LOG(ERROR) << "Create entry failed " << key.c_str(); | |
| 556 stats_.OnEvent(Stats::CREATE_ERROR); | |
| 557 return NULL; | |
| 558 } | |
| 559 | |
| 560 Addr node_address(0); | |
| 561 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) { | |
| 562 block_files_.DeleteBlock(entry_address, false); | |
| 563 LOG(ERROR) << "Create entry failed " << key.c_str(); | |
| 564 stats_.OnEvent(Stats::CREATE_ERROR); | |
| 565 return NULL; | |
| 566 } | |
| 567 | |
| 568 scoped_refptr<EntryImpl> cache_entry( | |
| 569 new EntryImpl(this, entry_address, false)); | |
| 570 IncreaseNumRefs(); | |
| 571 | |
| 572 if (!cache_entry->CreateEntry(node_address, key, hash)) { | |
| 573 block_files_.DeleteBlock(entry_address, false); | |
| 574 block_files_.DeleteBlock(node_address, false); | |
| 575 LOG(ERROR) << "Create entry failed " << key.c_str(); | |
| 576 stats_.OnEvent(Stats::CREATE_ERROR); | |
| 577 return NULL; | |
| 578 } | |
| 579 | |
| 580 cache_entry->BeginLogging(net_log_, true); | |
| 581 | |
| 582 // We are not failing the operation; let's add this to the map. | |
| 583 open_entries_[entry_address.value()] = cache_entry.get(); | |
| 584 | |
| 585 // Save the entry. | |
| 586 cache_entry->entry()->Store(); | |
| 587 cache_entry->rankings()->Store(); | |
| 588 IncreaseNumEntries(); | |
| 589 entry_count_++; | |
| 590 | |
| 591 // Link this entry through the index. | |
| 592 if (parent.get()) { | |
| 593 parent->SetNextAddress(entry_address); | |
| 594 } else { | |
| 595 data_->table[hash & mask_] = entry_address.value(); | |
| 596 } | |
| 597 | |
| 598 // Link this entry through the lists. | |
| 599 eviction_.OnCreateEntry(cache_entry.get()); | |
| 600 | |
| 601 CACHE_UMA(AGE_MS, "CreateTime", 0, start); | |
| 602 stats_.OnEvent(Stats::CREATE_HIT); | |
| 603 SIMPLE_STATS_COUNTER("disk_cache.miss"); | |
| 604 Trace("create entry hit "); | |
| 605 FlushIndex(); | |
| 606 cache_entry->AddRef(); | |
| 607 return cache_entry.get(); | |
| 608 } | |
| 609 | |
| 610 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) { | |
| 611 return OpenFollowingEntry(true, iter); | |
| 612 } | |
| 613 | |
| 614 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) { | |
| 615 return OpenFollowingEntry(false, iter); | |
| 616 } | |
| 617 | |
| 618 bool BackendImpl::SetMaxSize(int max_bytes) { | |
| 619 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); | |
| 620 if (max_bytes < 0) | |
| 621 return false; | |
| 622 | |
| 623 // Zero size means use the default. | |
| 624 if (!max_bytes) | |
| 625 return true; | |
| 626 | |
| 627 // Avoid a DCHECK later on. | |
| 628 if (max_bytes >= kint32max - kint32max / 10) | |
| 629 max_bytes = kint32max - kint32max / 10 - 1; | |
| 630 | |
| 631 user_flags_ |= kMaxSize; | |
| 632 max_size_ = max_bytes; | |
| 633 return true; | |
| 634 } | |
| 635 | |
| 636 void BackendImpl::SetType(net::CacheType type) { | |
| 637 DCHECK_NE(net::MEMORY_CACHE, type); | |
| 638 cache_type_ = type; | |
| 639 } | |
| 640 | |
| 641 base::FilePath BackendImpl::GetFileName(Addr address) const { | |
| 642 if (!address.is_separate_file() || !address.is_initialized()) { | |
| 643 NOTREACHED(); | |
| 644 return base::FilePath(); | |
| 645 } | |
| 646 | |
| 647 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber()); | |
| 648 return path_.AppendASCII(tmp); | |
| 649 } | |
| 650 | |
| 651 MappedFile* BackendImpl::File(Addr address) { | |
| 652 if (disabled_) | |
| 653 return NULL; | |
| 654 return block_files_.GetFile(address); | |
| 655 } | |
| 656 | |
| 657 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() { | |
| 658 return background_queue_.GetWeakPtr(); | |
| 659 } | |
| 660 | |
| 661 bool BackendImpl::CreateExternalFile(Addr* address) { | |
| 662 int file_number = data_->header.last_file + 1; | |
| 663 Addr file_address(0); | |
| 664 bool success = false; | |
| 665 for (int i = 0; i < 0x0fffffff; i++, file_number++) { | |
| 666 if (!file_address.SetFileNumber(file_number)) { | |
| 667 file_number = 1; | |
| 668 continue; | |
| 669 } | |
| 670 base::FilePath name = GetFileName(file_address); | |
| 671 int flags = base::PLATFORM_FILE_READ | | |
| 672 base::PLATFORM_FILE_WRITE | | |
| 673 base::PLATFORM_FILE_CREATE | | |
| 674 base::PLATFORM_FILE_EXCLUSIVE_WRITE; | |
| 675 base::PlatformFileError error; | |
| 676 scoped_refptr<disk_cache::File> file(new disk_cache::File( | |
| 677 base::CreatePlatformFile(name, flags, NULL, &error))); | |
| 678 if (!file->IsValid()) { | |
| 679 if (error != base::PLATFORM_FILE_ERROR_EXISTS) { | |
| 680 LOG(ERROR) << "Unable to create file: " << error; | |
| 681 return false; | |
| 682 } | |
| 683 continue; | |
| 684 } | |
| 685 | |
| 686 success = true; | |
| 687 break; | |
| 688 } | |
| 689 | |
| 690 DCHECK(success); | |
| 691 if (!success) | |
| 692 return false; | |
| 693 | |
| 694 data_->header.last_file = file_number; | |
| 695 address->set_value(file_address.value()); | |
| 696 return true; | |
| 697 } | |
| 698 | |
| 699 bool BackendImpl::CreateBlock(FileType block_type, int block_count, | |
| 700 Addr* block_address) { | |
| 701 return block_files_.CreateBlock(block_type, block_count, block_address); | |
| 702 } | |
| 703 | |
| 704 void BackendImpl::DeleteBlock(Addr block_address, bool deep) { | |
| 705 block_files_.DeleteBlock(block_address, deep); | |
| 706 } | |
| 707 | |
| 708 LruData* BackendImpl::GetLruData() { | |
| 709 return &data_->header.lru; | |
| 710 } | |
| 711 | |
| 712 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) { | |
| 713 if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE)) | |
| 714 return; | |
| 715 eviction_.UpdateRank(entry, modified); | |
| 716 } | |
| 717 | |
| 718 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) { | |
| 719 Addr address(rankings->Data()->contents); | |
| 720 EntryImpl* cache_entry = NULL; | |
| 721 if (NewEntry(address, &cache_entry)) { | |
| 722 STRESS_NOTREACHED(); | |
| 723 return; | |
| 724 } | |
| 725 | |
| 726 uint32 hash = cache_entry->GetHash(); | |
| 727 cache_entry->Release(); | |
| 728 | |
| 729 // Anything on the table means that this entry is there. | |
| 730 if (data_->table[hash & mask_]) | |
| 731 return; | |
| 732 | |
| 733 data_->table[hash & mask_] = address.value(); | |
| 734 FlushIndex(); | |
| 735 } | |
| 736 | |
| 737 void BackendImpl::InternalDoomEntry(EntryImpl* entry) { | |
| 738 uint32 hash = entry->GetHash(); | |
| 739 std::string key = entry->GetKey(); | |
| 740 Addr entry_addr = entry->entry()->address(); | |
| 741 bool error; | |
| 742 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error); | |
| 743 CacheAddr child(entry->GetNextAddress()); | |
| 744 | |
| 745 Trace("Doom entry 0x%p", entry); | |
| 746 | |
| 747 if (!entry->doomed()) { | |
| 748 // We may have doomed this entry from within MatchEntry. | |
| 749 eviction_.OnDoomEntry(entry); | |
| 750 entry->InternalDoom(); | |
| 751 if (!new_eviction_) { | |
| 752 DecreaseNumEntries(); | |
| 753 } | |
| 754 stats_.OnEvent(Stats::DOOM_ENTRY); | |
| 755 } | |
| 756 | |
| 757 if (parent_entry) { | |
| 758 parent_entry->SetNextAddress(Addr(child)); | |
| 759 parent_entry->Release(); | |
| 760 } else if (!error) { | |
| 761 data_->table[hash & mask_] = child; | |
| 762 } | |
| 763 | |
| 764 FlushIndex(); | |
| 765 } | |
| 766 | |
| 767 #if defined(NET_BUILD_STRESS_CACHE) | |
| 768 | |
| 769 CacheAddr BackendImpl::GetNextAddr(Addr address) { | |
| 770 EntriesMap::iterator it = open_entries_.find(address.value()); | |
| 771 if (it != open_entries_.end()) { | |
| 772 EntryImpl* this_entry = it->second; | |
| 773 return this_entry->GetNextAddress(); | |
| 774 } | |
| 775 DCHECK(block_files_.IsValid(address)); | |
| 776 DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256); | |
| 777 | |
| 778 CacheEntryBlock entry(File(address), address); | |
| 779 CHECK(entry.Load()); | |
| 780 return entry.Data()->next; | |
| 781 } | |
| 782 | |
| 783 void BackendImpl::NotLinked(EntryImpl* entry) { | |
| 784 Addr entry_addr = entry->entry()->address(); | |
| 785 uint32 i = entry->GetHash() & mask_; | |
| 786 Addr address(data_->table[i]); | |
| 787 if (!address.is_initialized()) | |
| 788 return; | |
| 789 | |
| 790 for (;;) { | |
| 791 DCHECK(entry_addr.value() != address.value()); | |
| 792 address.set_value(GetNextAddr(address)); | |
| 793 if (!address.is_initialized()) | |
| 794 break; | |
| 795 } | |
| 796 } | |
| 797 #endif // NET_BUILD_STRESS_CACHE | |
| 798 | |
| 799 // An entry may be linked on the DELETED list for a while after being doomed. | |
| 800 // This function is called when we want to remove it. | |
| 801 void BackendImpl::RemoveEntry(EntryImpl* entry) { | |
| 802 #if defined(NET_BUILD_STRESS_CACHE) | |
| 803 NotLinked(entry); | |
| 804 #endif | |
| 805 if (!new_eviction_) | |
| 806 return; | |
| 807 | |
| 808 DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state); | |
| 809 | |
| 810 Trace("Remove entry 0x%p", entry); | |
| 811 eviction_.OnDestroyEntry(entry); | |
| 812 DecreaseNumEntries(); | |
| 813 } | |
| 814 | |
| 815 void BackendImpl::OnEntryDestroyBegin(Addr address) { | |
| 816 EntriesMap::iterator it = open_entries_.find(address.value()); | |
| 817 if (it != open_entries_.end()) | |
| 818 open_entries_.erase(it); | |
| 819 } | |
| 820 | |
| 821 void BackendImpl::OnEntryDestroyEnd() { | |
| 822 DecreaseNumRefs(); | |
| 823 if (data_->header.num_bytes > max_size_ && !read_only_ && | |
| 824 (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom)) | |
| 825 eviction_.TrimCache(false); | |
| 826 } | |
| 827 | |
| 828 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const { | |
| 829 DCHECK(rankings->HasData()); | |
| 830 EntriesMap::const_iterator it = | |
| 831 open_entries_.find(rankings->Data()->contents); | |
| 832 if (it != open_entries_.end()) { | |
| 833 // We have this entry in memory. | |
| 834 return it->second; | |
| 835 } | |
| 836 | |
| 837 return NULL; | |
| 838 } | |
| 839 | |
| 840 int32 BackendImpl::GetCurrentEntryId() const { | |
| 841 return data_->header.this_id; | |
| 842 } | |
| 843 | |
| 844 int BackendImpl::MaxFileSize() const { | |
| 845 return cache_type() == net::PNACL_CACHE ? max_size_ : max_size_ / 8; | |
| 846 } | |
| 847 | |
| 848 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) { | |
| 849 if (disabled_ || old_size == new_size) | |
| 850 return; | |
| 851 if (old_size > new_size) | |
| 852 SubstractStorageSize(old_size - new_size); | |
| 853 else | |
| 854 AddStorageSize(new_size - old_size); | |
| 855 | |
| 856 FlushIndex(); | |
| 857 | |
| 858 // Update the usage statistics. | |
| 859 stats_.ModifyStorageStats(old_size, new_size); | |
| 860 } | |
| 861 | |
| 862 void BackendImpl::TooMuchStorageRequested(int32 size) { | |
| 863 stats_.ModifyStorageStats(0, size); | |
| 864 } | |
| 865 | |
| 866 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) { | |
| 867 DCHECK_GT(new_size, current_size); | |
| 868 if (user_flags_ & kNoBuffering) | |
| 869 return false; | |
| 870 | |
| 871 int to_add = new_size - current_size; | |
| 872 if (buffer_bytes_ + to_add > MaxBuffersSize()) | |
| 873 return false; | |
| 874 | |
| 875 buffer_bytes_ += to_add; | |
| 876 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024); | |
| 877 return true; | |
| 878 } | |
| 879 | |
| 880 void BackendImpl::BufferDeleted(int size) { | |
| 881 buffer_bytes_ -= size; | |
| 882 DCHECK_GE(size, 0); | |
| 883 } | |
| 884 | |
| 885 bool BackendImpl::IsLoaded() const { | |
| 886 CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_); | |
| 887 if (user_flags_ & kNoLoadProtection) | |
| 888 return false; | |
| 889 | |
| 890 return (num_pending_io_ > 5 || user_load_); | |
| 891 } | |
| 892 | |
| 893 std::string BackendImpl::HistogramName(const char* name, int experiment) const { | |
| 894 if (!experiment) | |
| 895 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name); | |
| 896 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_, | |
| 897 name, experiment); | |
| 898 } | |
| 899 | |
| 900 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() { | |
| 901 return ptr_factory_.GetWeakPtr(); | |
| 902 } | |
| 903 | |
| 904 // We want to remove biases from some histograms so we only send data once per | |
| 905 // week. | |
| 906 bool BackendImpl::ShouldReportAgain() { | |
| 907 if (uma_report_) | |
| 908 return uma_report_ == 2; | |
| 909 | |
| 910 uma_report_++; | |
| 911 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); | |
| 912 Time last_time = Time::FromInternalValue(last_report); | |
| 913 if (!last_report || (Time::Now() - last_time).InDays() >= 7) { | |
| 914 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue()); | |
| 915 uma_report_++; | |
| 916 return true; | |
| 917 } | |
| 918 return false; | |
| 919 } | |
| 920 | |
| 921 void BackendImpl::FirstEviction() { | |
| 922 DCHECK(data_->header.create_time); | |
| 923 if (!GetEntryCount()) | |
| 924 return; // This is just for unit tests. | |
| 925 | |
| 926 Time create_time = Time::FromInternalValue(data_->header.create_time); | |
| 927 CACHE_UMA(AGE, "FillupAge", 0, create_time); | |
| 928 | |
| 929 int64 use_time = stats_.GetCounter(Stats::TIMER); | |
| 930 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120)); | |
| 931 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio()); | |
| 932 | |
| 933 if (!use_time) | |
| 934 use_time = 1; | |
| 935 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0, | |
| 936 static_cast<int>(data_->header.num_entries / use_time)); | |
| 937 CACHE_UMA(COUNTS, "FirstByteIORate", 0, | |
| 938 static_cast<int>((data_->header.num_bytes / 1024) / use_time)); | |
| 939 | |
| 940 int avg_size = data_->header.num_bytes / GetEntryCount(); | |
| 941 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size); | |
| 942 | |
| 943 int large_entries_bytes = stats_.GetLargeEntriesSize(); | |
| 944 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; | |
| 945 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio); | |
| 946 | |
| 947 if (new_eviction_) { | |
| 948 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); | |
| 949 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, | |
| 950 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); | |
| 951 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, | |
| 952 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); | |
| 953 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, | |
| 954 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); | |
| 955 } | |
| 956 | |
| 957 stats_.ResetRatios(); | |
| 958 } | |
| 959 | |
| 960 void BackendImpl::CriticalError(int error) { | |
| 961 STRESS_NOTREACHED(); | |
| 962 LOG(ERROR) << "Critical error found " << error; | |
| 963 if (disabled_) | |
| 964 return; | |
| 965 | |
| 966 stats_.OnEvent(Stats::FATAL_ERROR); | |
| 967 LogStats(); | |
| 968 ReportError(error); | |
| 969 | |
| 970 // Setting the index table length to an invalid value will force re-creation | |
| 971 // of the cache files. | |
| 972 data_->header.table_len = 1; | |
| 973 disabled_ = true; | |
| 974 | |
| 975 if (!num_refs_) | |
| 976 base::MessageLoop::current()->PostTask( | |
| 977 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true)); | |
| 978 } | |
| 979 | |
| 980 void BackendImpl::ReportError(int error) { | |
| 981 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH || | |
| 982 error == ERR_CACHE_CREATED); | |
| 983 | |
| 984 // We transmit positive numbers, instead of direct error codes. | |
| 985 DCHECK_LE(error, 0); | |
| 986 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); | |
| 987 } | |
| 988 | |
| 989 void BackendImpl::OnEvent(Stats::Counters an_event) { | |
| 990 stats_.OnEvent(an_event); | |
| 991 } | |
| 992 | |
| 993 void BackendImpl::OnRead(int32 bytes) { | |
| 994 DCHECK_GE(bytes, 0); | |
| 995 byte_count_ += bytes; | |
| 996 if (byte_count_ < 0) | |
| 997 byte_count_ = kint32max; | |
| 998 } | |
| 999 | |
| 1000 void BackendImpl::OnWrite(int32 bytes) { | |
| 1001 // We use the same implementation as OnRead... just log the number of bytes. | |
| 1002 OnRead(bytes); | |
| 1003 } | |
| 1004 | |
| 1005 void BackendImpl::OnStatsTimer() { | |
| 1006 if (disabled_) | |
| 1007 return; | |
| 1008 | |
| 1009 stats_.OnEvent(Stats::TIMER); | |
| 1010 int64 time = stats_.GetCounter(Stats::TIMER); | |
| 1011 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); | |
| 1012 | |
| 1013 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding | |
| 1014 // the bias towards 0. | |
| 1015 if (num_refs_ && (current != num_refs_)) { | |
| 1016 int64 diff = (num_refs_ - current) / 50; | |
| 1017 if (!diff) | |
| 1018 diff = num_refs_ > current ? 1 : -1; | |
| 1019 current = current + diff; | |
| 1020 stats_.SetCounter(Stats::OPEN_ENTRIES, current); | |
| 1021 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); | |
| 1022 } | |
| 1023 | |
| 1024 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_); | |
| 1025 | |
| 1026 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_); | |
| 1027 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024); | |
| 1028 | |
| 1029 // These values cover about 99.5% of the population (Oct 2011). | |
| 1030 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024); | |
| 1031 entry_count_ = 0; | |
| 1032 byte_count_ = 0; | |
| 1033 up_ticks_++; | |
| 1034 | |
| 1035 if (!data_) | |
| 1036 first_timer_ = false; | |
| 1037 if (first_timer_) { | |
| 1038 first_timer_ = false; | |
| 1039 if (ShouldReportAgain()) | |
| 1040 ReportStats(); | |
| 1041 } | |
| 1042 | |
| 1043 // Save stats to disk at 5 min intervals. | |
| 1044 if (time % 10 == 0) | |
| 1045 StoreStats(); | |
| 1046 } | |
| 1047 | |
| 1048 void BackendImpl::IncrementIoCount() { | |
| 1049 num_pending_io_++; | |
| 1050 } | |
| 1051 | |
| 1052 void BackendImpl::DecrementIoCount() { | |
| 1053 num_pending_io_--; | |
| 1054 } | |
| 1055 | |
| 1056 void BackendImpl::SetUnitTestMode() { | |
| 1057 user_flags_ |= kUnitTestMode; | |
| 1058 unit_test_ = true; | |
| 1059 } | |
| 1060 | |
| 1061 void BackendImpl::SetUpgradeMode() { | |
| 1062 user_flags_ |= kUpgradeMode; | |
| 1063 read_only_ = true; | |
| 1064 } | |
| 1065 | |
| 1066 void BackendImpl::SetNewEviction() { | |
| 1067 user_flags_ |= kNewEviction; | |
| 1068 new_eviction_ = true; | |
| 1069 } | |
| 1070 | |
| 1071 void BackendImpl::SetFlags(uint32 flags) { | |
| 1072 user_flags_ |= flags; | |
| 1073 } | |
| 1074 | |
| 1075 void BackendImpl::ClearRefCountForTest() { | |
| 1076 num_refs_ = 0; | |
| 1077 } | |
| 1078 | |
| 1079 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) { | |
| 1080 background_queue_.FlushQueue(callback); | |
| 1081 return net::ERR_IO_PENDING; | |
| 1082 } | |
| 1083 | |
| 1084 int BackendImpl::RunTaskForTest(const base::Closure& task, | |
| 1085 const CompletionCallback& callback) { | |
| 1086 background_queue_.RunTask(task, callback); | |
| 1087 return net::ERR_IO_PENDING; | |
| 1088 } | |
| 1089 | |
| 1090 void BackendImpl::TrimForTest(bool empty) { | |
| 1091 eviction_.SetTestMode(); | |
| 1092 eviction_.TrimCache(empty); | |
| 1093 } | |
| 1094 | |
| 1095 void BackendImpl::TrimDeletedListForTest(bool empty) { | |
| 1096 eviction_.SetTestMode(); | |
| 1097 eviction_.TrimDeletedList(empty); | |
| 1098 } | |
| 1099 | |
| 1100 base::RepeatingTimer<BackendImpl>* BackendImpl::GetTimerForTest() { | |
| 1101 return timer_.get(); | |
| 1102 } | |
| 1103 | |
| 1104 int BackendImpl::SelfCheck() { | |
| 1105 if (!init_) { | |
| 1106 LOG(ERROR) << "Init failed"; | |
| 1107 return ERR_INIT_FAILED; | |
| 1108 } | |
| 1109 | |
| 1110 int num_entries = rankings_.SelfCheck(); | |
| 1111 if (num_entries < 0) { | |
| 1112 LOG(ERROR) << "Invalid rankings list, error " << num_entries; | |
| 1113 #if !defined(NET_BUILD_STRESS_CACHE) | |
| 1114 return num_entries; | |
| 1115 #endif | |
| 1116 } | |
| 1117 | |
| 1118 if (num_entries != data_->header.num_entries) { | |
| 1119 LOG(ERROR) << "Number of entries mismatch"; | |
| 1120 #if !defined(NET_BUILD_STRESS_CACHE) | |
| 1121 return ERR_NUM_ENTRIES_MISMATCH; | |
| 1122 #endif | |
| 1123 } | |
| 1124 | |
| 1125 return CheckAllEntries(); | |
| 1126 } | |
| 1127 | |
| 1128 void BackendImpl::FlushIndex() { | |
| 1129 if (index_.get() && !disabled_) | |
| 1130 index_->Flush(); | |
| 1131 } | |
| 1132 | |
| 1133 // ------------------------------------------------------------------------ | |
| 1134 | |
| 1135 net::CacheType BackendImpl::GetCacheType() const { | |
| 1136 return cache_type_; | |
| 1137 } | |
| 1138 | |
| 1139 int32 BackendImpl::GetEntryCount() const { | |
| 1140 if (!index_.get() || disabled_) | |
| 1141 return 0; | |
| 1142 // num_entries includes entries already evicted. | |
| 1143 int32 not_deleted = data_->header.num_entries - | |
| 1144 data_->header.lru.sizes[Rankings::DELETED]; | |
| 1145 | |
| 1146 if (not_deleted < 0) { | |
| 1147 NOTREACHED(); | |
| 1148 not_deleted = 0; | |
| 1149 } | |
| 1150 | |
| 1151 return not_deleted; | |
| 1152 } | |
| 1153 | |
| 1154 int BackendImpl::OpenEntry(const std::string& key, Entry** entry, | |
| 1155 const CompletionCallback& callback) { | |
| 1156 DCHECK(!callback.is_null()); | |
| 1157 background_queue_.OpenEntry(key, entry, callback); | |
| 1158 return net::ERR_IO_PENDING; | |
| 1159 } | |
| 1160 | |
| 1161 int BackendImpl::CreateEntry(const std::string& key, Entry** entry, | |
| 1162 const CompletionCallback& callback) { | |
| 1163 DCHECK(!callback.is_null()); | |
| 1164 background_queue_.CreateEntry(key, entry, callback); | |
| 1165 return net::ERR_IO_PENDING; | |
| 1166 } | |
| 1167 | |
| 1168 int BackendImpl::DoomEntry(const std::string& key, | |
| 1169 const CompletionCallback& callback) { | |
| 1170 DCHECK(!callback.is_null()); | |
| 1171 background_queue_.DoomEntry(key, callback); | |
| 1172 return net::ERR_IO_PENDING; | |
| 1173 } | |
| 1174 | |
| 1175 int BackendImpl::DoomAllEntries(const CompletionCallback& callback) { | |
| 1176 DCHECK(!callback.is_null()); | |
| 1177 background_queue_.DoomAllEntries(callback); | |
| 1178 return net::ERR_IO_PENDING; | |
| 1179 } | |
| 1180 | |
| 1181 int BackendImpl::DoomEntriesBetween(const base::Time initial_time, | |
| 1182 const base::Time end_time, | |
| 1183 const CompletionCallback& callback) { | |
| 1184 DCHECK(!callback.is_null()); | |
| 1185 background_queue_.DoomEntriesBetween(initial_time, end_time, callback); | |
| 1186 return net::ERR_IO_PENDING; | |
| 1187 } | |
| 1188 | |
| 1189 int BackendImpl::DoomEntriesSince(const base::Time initial_time, | |
| 1190 const CompletionCallback& callback) { | |
| 1191 DCHECK(!callback.is_null()); | |
| 1192 background_queue_.DoomEntriesSince(initial_time, callback); | |
| 1193 return net::ERR_IO_PENDING; | |
| 1194 } | |
| 1195 | |
| 1196 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, | |
| 1197 const CompletionCallback& callback) { | |
| 1198 DCHECK(!callback.is_null()); | |
| 1199 background_queue_.OpenNextEntry(iter, next_entry, callback); | |
| 1200 return net::ERR_IO_PENDING; | |
| 1201 } | |
| 1202 | |
| 1203 void BackendImpl::EndEnumeration(void** iter) { | |
| 1204 background_queue_.EndEnumeration(*iter); | |
| 1205 *iter = NULL; | |
| 1206 } | |
| 1207 | |
| 1208 void BackendImpl::GetStats(StatsItems* stats) { | |
| 1209 if (disabled_) | |
| 1210 return; | |
| 1211 | |
| 1212 std::pair<std::string, std::string> item; | |
| 1213 | |
| 1214 item.first = "Entries"; | |
| 1215 item.second = base::StringPrintf("%d", data_->header.num_entries); | |
| 1216 stats->push_back(item); | |
| 1217 | |
| 1218 item.first = "Pending IO"; | |
| 1219 item.second = base::StringPrintf("%d", num_pending_io_); | |
| 1220 stats->push_back(item); | |
| 1221 | |
| 1222 item.first = "Max size"; | |
| 1223 item.second = base::StringPrintf("%d", max_size_); | |
| 1224 stats->push_back(item); | |
| 1225 | |
| 1226 item.first = "Current size"; | |
| 1227 item.second = base::StringPrintf("%d", data_->header.num_bytes); | |
| 1228 stats->push_back(item); | |
| 1229 | |
| 1230 item.first = "Cache type"; | |
| 1231 item.second = "Blockfile Cache"; | |
| 1232 stats->push_back(item); | |
| 1233 | |
| 1234 stats_.GetItems(stats); | |
| 1235 } | |
| 1236 | |
| 1237 void BackendImpl::OnExternalCacheHit(const std::string& key) { | |
| 1238 background_queue_.OnExternalCacheHit(key); | |
| 1239 } | |
| 1240 | |
| 1241 // ------------------------------------------------------------------------ | |
| 1242 | |
| 1243 // We just created a new file so we're going to write the header and set the | |
| 1244 // file length to include the hash table (zero filled). | |
| 1245 bool BackendImpl::CreateBackingStore(disk_cache::File* file) { | |
| 1246 AdjustMaxCacheSize(0); | |
| 1247 | |
| 1248 IndexHeader header; | |
| 1249 header.table_len = DesiredIndexTableLen(max_size_); | |
| 1250 | |
| 1251 // We need file version 2.1 for the new eviction algorithm. | |
| 1252 if (new_eviction_) | |
| 1253 header.version = 0x20001; | |
| 1254 | |
| 1255 header.create_time = Time::Now().ToInternalValue(); | |
| 1256 | |
| 1257 if (!file->Write(&header, sizeof(header), 0)) | |
| 1258 return false; | |
| 1259 | |
| 1260 return file->SetLength(GetIndexSize(header.table_len)); | |
| 1261 } | |
| 1262 | |
| 1263 bool BackendImpl::InitBackingStore(bool* file_created) { | |
| 1264 if (!base::CreateDirectory(path_)) | |
| 1265 return false; | |
| 1266 | |
| 1267 base::FilePath index_name = path_.AppendASCII(kIndexName); | |
| 1268 | |
| 1269 int flags = base::PLATFORM_FILE_READ | | |
| 1270 base::PLATFORM_FILE_WRITE | | |
| 1271 base::PLATFORM_FILE_OPEN_ALWAYS | | |
| 1272 base::PLATFORM_FILE_EXCLUSIVE_WRITE; | |
| 1273 scoped_refptr<disk_cache::File> file(new disk_cache::File( | |
| 1274 base::CreatePlatformFile(index_name, flags, file_created, NULL))); | |
| 1275 | |
| 1276 if (!file->IsValid()) | |
| 1277 return false; | |
| 1278 | |
| 1279 bool ret = true; | |
| 1280 if (*file_created) | |
| 1281 ret = CreateBackingStore(file.get()); | |
| 1282 | |
| 1283 file = NULL; | |
| 1284 if (!ret) | |
| 1285 return false; | |
| 1286 | |
| 1287 index_ = new MappedFile(); | |
| 1288 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0)); | |
| 1289 if (!data_) { | |
| 1290 LOG(ERROR) << "Unable to map Index file"; | |
| 1291 return false; | |
| 1292 } | |
| 1293 | |
| 1294 if (index_->GetLength() < sizeof(Index)) { | |
| 1295 // We verify this again on CheckIndex() but it's easier to make sure now | |
| 1296 // that the header is there. | |
| 1297 LOG(ERROR) << "Corrupt Index file"; | |
| 1298 return false; | |
| 1299 } | |
| 1300 | |
| 1301 return true; | |
| 1302 } | |
| 1303 | |
| 1304 // The maximum cache size will be either set explicitly by the caller, or | |
| 1305 // calculated by this code. | |
| 1306 void BackendImpl::AdjustMaxCacheSize(int table_len) { | |
| 1307 if (max_size_) | |
| 1308 return; | |
| 1309 | |
| 1310 // If table_len is provided, the index file exists. | |
| 1311 DCHECK(!table_len || data_->header.magic); | |
| 1312 | |
| 1313 // The user is not setting the size, let's figure it out. | |
| 1314 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); | |
| 1315 if (available < 0) { | |
| 1316 max_size_ = kDefaultCacheSize; | |
| 1317 return; | |
| 1318 } | |
| 1319 | |
| 1320 if (table_len) | |
| 1321 available += data_->header.num_bytes; | |
| 1322 | |
| 1323 max_size_ = PreferredCacheSize(available); | |
| 1324 | |
| 1325 if (!table_len) | |
| 1326 return; | |
| 1327 | |
| 1328 // If we already have a table, adjust the size to it. | |
| 1329 int current_max_size = MaxStorageSizeForTable(table_len); | |
| 1330 if (max_size_ > current_max_size) | |
| 1331 max_size_= current_max_size; | |
| 1332 } | |
| 1333 | |
| 1334 bool BackendImpl::InitStats() { | |
| 1335 Addr address(data_->header.stats); | |
| 1336 int size = stats_.StorageSize(); | |
| 1337 | |
| 1338 if (!address.is_initialized()) { | |
| 1339 FileType file_type = Addr::RequiredFileType(size); | |
| 1340 DCHECK_NE(file_type, EXTERNAL); | |
| 1341 int num_blocks = Addr::RequiredBlocks(size, file_type); | |
| 1342 | |
| 1343 if (!CreateBlock(file_type, num_blocks, &address)) | |
| 1344 return false; | |
| 1345 | |
| 1346 data_->header.stats = address.value(); | |
| 1347 return stats_.Init(NULL, 0, address); | |
| 1348 } | |
| 1349 | |
| 1350 if (!address.is_block_file()) { | |
| 1351 NOTREACHED(); | |
| 1352 return false; | |
| 1353 } | |
| 1354 | |
| 1355 // Load the required data. | |
| 1356 size = address.num_blocks() * address.BlockSize(); | |
| 1357 MappedFile* file = File(address); | |
| 1358 if (!file) | |
| 1359 return false; | |
| 1360 | |
| 1361 scoped_ptr<char[]> data(new char[size]); | |
| 1362 size_t offset = address.start_block() * address.BlockSize() + | |
| 1363 kBlockHeaderSize; | |
| 1364 if (!file->Read(data.get(), size, offset)) | |
| 1365 return false; | |
| 1366 | |
| 1367 if (!stats_.Init(data.get(), size, address)) | |
| 1368 return false; | |
| 1369 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain()) | |
| 1370 stats_.InitSizeHistogram(); | |
| 1371 return true; | |
| 1372 } | |
| 1373 | |
| 1374 void BackendImpl::StoreStats() { | |
| 1375 int size = stats_.StorageSize(); | |
| 1376 scoped_ptr<char[]> data(new char[size]); | |
| 1377 Addr address; | |
| 1378 size = stats_.SerializeStats(data.get(), size, &address); | |
| 1379 DCHECK(size); | |
| 1380 if (!address.is_initialized()) | |
| 1381 return; | |
| 1382 | |
| 1383 MappedFile* file = File(address); | |
| 1384 if (!file) | |
| 1385 return; | |
| 1386 | |
| 1387 size_t offset = address.start_block() * address.BlockSize() + | |
| 1388 kBlockHeaderSize; | |
| 1389 file->Write(data.get(), size, offset); // ignore result. | |
| 1390 } | |
| 1391 | |
| 1392 void BackendImpl::RestartCache(bool failure) { | |
| 1393 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); | |
| 1394 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE); | |
| 1395 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT); | |
| 1396 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); | |
| 1397 | |
| 1398 PrepareForRestart(); | |
| 1399 if (failure) { | |
| 1400 DCHECK(!num_refs_); | |
| 1401 DCHECK(!open_entries_.size()); | |
| 1402 DelayedCacheCleanup(path_); | |
| 1403 } else { | |
| 1404 DeleteCache(path_, false); | |
| 1405 } | |
| 1406 | |
| 1407 // Don't call Init() if directed by the unit test: we are simulating a failure | |
| 1408 // trying to re-enable the cache. | |
| 1409 if (unit_test_) | |
| 1410 init_ = true; // Let the destructor do proper cleanup. | |
| 1411 else if (SyncInit() == net::OK) { | |
| 1412 stats_.SetCounter(Stats::FATAL_ERROR, errors); | |
| 1413 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms); | |
| 1414 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms); | |
| 1415 stats_.SetCounter(Stats::LAST_REPORT, last_report); | |
| 1416 } | |
| 1417 } | |
| 1418 | |
| 1419 void BackendImpl::PrepareForRestart() { | |
| 1420 // Reset the mask_ if it was not given by the user. | |
| 1421 if (!(user_flags_ & kMask)) | |
| 1422 mask_ = 0; | |
| 1423 | |
| 1424 if (!(user_flags_ & kNewEviction)) | |
| 1425 new_eviction_ = false; | |
| 1426 | |
| 1427 disabled_ = true; | |
| 1428 data_->header.crash = 0; | |
| 1429 index_->Flush(); | |
| 1430 index_ = NULL; | |
| 1431 data_ = NULL; | |
| 1432 block_files_.CloseFiles(); | |
| 1433 rankings_.Reset(); | |
| 1434 init_ = false; | |
| 1435 restarted_ = true; | |
| 1436 } | |
| 1437 | |
| 1438 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) { | |
| 1439 EntriesMap::iterator it = open_entries_.find(address.value()); | |
| 1440 if (it != open_entries_.end()) { | |
| 1441 // Easy job. This entry is already in memory. | |
| 1442 EntryImpl* this_entry = it->second; | |
| 1443 this_entry->AddRef(); | |
| 1444 *entry = this_entry; | |
| 1445 return 0; | |
| 1446 } | |
| 1447 | |
| 1448 STRESS_DCHECK(block_files_.IsValid(address)); | |
| 1449 | |
| 1450 if (!address.SanityCheckForEntryV2()) { | |
| 1451 LOG(WARNING) << "Wrong entry address."; | |
| 1452 STRESS_NOTREACHED(); | |
| 1453 return ERR_INVALID_ADDRESS; | |
| 1454 } | |
| 1455 | |
| 1456 scoped_refptr<EntryImpl> cache_entry( | |
| 1457 new EntryImpl(this, address, read_only_)); | |
| 1458 IncreaseNumRefs(); | |
| 1459 *entry = NULL; | |
| 1460 | |
| 1461 TimeTicks start = TimeTicks::Now(); | |
| 1462 if (!cache_entry->entry()->Load()) | |
| 1463 return ERR_READ_FAILURE; | |
| 1464 | |
| 1465 if (IsLoaded()) { | |
| 1466 CACHE_UMA(AGE_MS, "LoadTime", 0, start); | |
| 1467 } | |
| 1468 | |
| 1469 if (!cache_entry->SanityCheck()) { | |
| 1470 LOG(WARNING) << "Messed up entry found."; | |
| 1471 STRESS_NOTREACHED(); | |
| 1472 return ERR_INVALID_ENTRY; | |
| 1473 } | |
| 1474 | |
| 1475 STRESS_DCHECK(block_files_.IsValid( | |
| 1476 Addr(cache_entry->entry()->Data()->rankings_node))); | |
| 1477 | |
| 1478 if (!cache_entry->LoadNodeAddress()) | |
| 1479 return ERR_READ_FAILURE; | |
| 1480 | |
| 1481 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) { | |
| 1482 STRESS_NOTREACHED(); | |
| 1483 cache_entry->SetDirtyFlag(0); | |
| 1484 // Don't remove this from the list (it is not linked properly). Instead, | |
| 1485 // break the link back to the entry because it is going away, and leave the | |
| 1486 // rankings node to be deleted if we find it through a list. | |
| 1487 rankings_.SetContents(cache_entry->rankings(), 0); | |
| 1488 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) { | |
| 1489 STRESS_NOTREACHED(); | |
| 1490 cache_entry->SetDirtyFlag(0); | |
| 1491 rankings_.SetContents(cache_entry->rankings(), address.value()); | |
| 1492 } | |
| 1493 | |
| 1494 if (!cache_entry->DataSanityCheck()) { | |
| 1495 LOG(WARNING) << "Messed up entry found."; | |
| 1496 cache_entry->SetDirtyFlag(0); | |
| 1497 cache_entry->FixForDelete(); | |
| 1498 } | |
| 1499 | |
| 1500 // Prevent overwriting the dirty flag on the destructor. | |
| 1501 cache_entry->SetDirtyFlag(GetCurrentEntryId()); | |
| 1502 | |
| 1503 if (cache_entry->dirty()) { | |
| 1504 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()), | |
| 1505 address.value()); | |
| 1506 } | |
| 1507 | |
| 1508 open_entries_[address.value()] = cache_entry.get(); | |
| 1509 | |
| 1510 cache_entry->BeginLogging(net_log_, false); | |
| 1511 cache_entry.swap(entry); | |
| 1512 return 0; | |
| 1513 } | |
| 1514 | |
| 1515 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash, | |
| 1516 bool find_parent, Addr entry_addr, | |
| 1517 bool* match_error) { | |
| 1518 Addr address(data_->table[hash & mask_]); | |
| 1519 scoped_refptr<EntryImpl> cache_entry, parent_entry; | |
| 1520 EntryImpl* tmp = NULL; | |
| 1521 bool found = false; | |
| 1522 std::set<CacheAddr> visited; | |
| 1523 *match_error = false; | |
| 1524 | |
| 1525 for (;;) { | |
| 1526 if (disabled_) | |
| 1527 break; | |
| 1528 | |
| 1529 if (visited.find(address.value()) != visited.end()) { | |
| 1530 // It's possible for a buggy version of the code to write a loop. Just | |
| 1531 // break it. | |
| 1532 Trace("Hash collision loop 0x%x", address.value()); | |
| 1533 address.set_value(0); | |
| 1534 parent_entry->SetNextAddress(address); | |
| 1535 } | |
| 1536 visited.insert(address.value()); | |
| 1537 | |
| 1538 if (!address.is_initialized()) { | |
| 1539 if (find_parent) | |
| 1540 found = true; | |
| 1541 break; | |
| 1542 } | |
| 1543 | |
| 1544 int error = NewEntry(address, &tmp); | |
| 1545 cache_entry.swap(&tmp); | |
| 1546 | |
| 1547 if (error || cache_entry->dirty()) { | |
| 1548 // This entry is dirty on disk (it was not properly closed): we cannot | |
| 1549 // trust it. | |
| 1550 Addr child(0); | |
| 1551 if (!error) | |
| 1552 child.set_value(cache_entry->GetNextAddress()); | |
| 1553 | |
| 1554 if (parent_entry.get()) { | |
| 1555 parent_entry->SetNextAddress(child); | |
| 1556 parent_entry = NULL; | |
| 1557 } else { | |
| 1558 data_->table[hash & mask_] = child.value(); | |
| 1559 } | |
| 1560 | |
| 1561 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(), | |
| 1562 address.value()); | |
| 1563 | |
| 1564 if (!error) { | |
| 1565 // It is important to call DestroyInvalidEntry after removing this | |
| 1566 // entry from the table. | |
| 1567 DestroyInvalidEntry(cache_entry.get()); | |
| 1568 cache_entry = NULL; | |
| 1569 } else { | |
| 1570 Trace("NewEntry failed on MatchEntry 0x%x", address.value()); | |
| 1571 } | |
| 1572 | |
| 1573 // Restart the search. | |
| 1574 address.set_value(data_->table[hash & mask_]); | |
| 1575 visited.clear(); | |
| 1576 continue; | |
| 1577 } | |
| 1578 | |
| 1579 DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_); | |
| 1580 if (cache_entry->IsSameEntry(key, hash)) { | |
| 1581 if (!cache_entry->Update()) | |
| 1582 cache_entry = NULL; | |
| 1583 found = true; | |
| 1584 if (find_parent && entry_addr.value() != address.value()) { | |
| 1585 Trace("Entry not on the index 0x%x", address.value()); | |
| 1586 *match_error = true; | |
| 1587 parent_entry = NULL; | |
| 1588 } | |
| 1589 break; | |
| 1590 } | |
| 1591 if (!cache_entry->Update()) | |
| 1592 cache_entry = NULL; | |
| 1593 parent_entry = cache_entry; | |
| 1594 cache_entry = NULL; | |
| 1595 if (!parent_entry.get()) | |
| 1596 break; | |
| 1597 | |
| 1598 address.set_value(parent_entry->GetNextAddress()); | |
| 1599 } | |
| 1600 | |
| 1601 if (parent_entry.get() && (!find_parent || !found)) | |
| 1602 parent_entry = NULL; | |
| 1603 | |
| 1604 if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) { | |
| 1605 *match_error = true; | |
| 1606 parent_entry = NULL; | |
| 1607 } | |
| 1608 | |
| 1609 if (cache_entry.get() && (find_parent || !found)) | |
| 1610 cache_entry = NULL; | |
| 1611 | |
| 1612 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp); | |
| 1613 FlushIndex(); | |
| 1614 return tmp; | |
| 1615 } | |
| 1616 | |
| 1617 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. | |
| 1618 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) { | |
| 1619 if (disabled_) | |
| 1620 return NULL; | |
| 1621 | |
| 1622 DCHECK(iter); | |
| 1623 | |
| 1624 const int kListsToSearch = 3; | |
| 1625 scoped_refptr<EntryImpl> entries[kListsToSearch]; | |
| 1626 scoped_ptr<Rankings::Iterator> iterator( | |
| 1627 reinterpret_cast<Rankings::Iterator*>(*iter)); | |
| 1628 *iter = NULL; | |
| 1629 | |
| 1630 if (!iterator.get()) { | |
| 1631 iterator.reset(new Rankings::Iterator(&rankings_)); | |
| 1632 bool ret = false; | |
| 1633 | |
| 1634 // Get an entry from each list. | |
| 1635 for (int i = 0; i < kListsToSearch; i++) { | |
| 1636 EntryImpl* temp = NULL; | |
| 1637 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), | |
| 1638 &iterator->nodes[i], &temp); | |
| 1639 entries[i].swap(&temp); // The entry was already addref'd. | |
| 1640 } | |
| 1641 if (!ret) | |
| 1642 return NULL; | |
| 1643 } else { | |
| 1644 // Get the next entry from the last list, and the actual entries for the | |
| 1645 // elements on the other lists. | |
| 1646 for (int i = 0; i < kListsToSearch; i++) { | |
| 1647 EntryImpl* temp = NULL; | |
| 1648 if (iterator->list == i) { | |
| 1649 OpenFollowingEntryFromList(forward, iterator->list, | |
| 1650 &iterator->nodes[i], &temp); | |
| 1651 } else { | |
| 1652 temp = GetEnumeratedEntry(iterator->nodes[i], | |
| 1653 static_cast<Rankings::List>(i)); | |
| 1654 } | |
| 1655 | |
| 1656 entries[i].swap(&temp); // The entry was already addref'd. | |
| 1657 } | |
| 1658 } | |
| 1659 | |
| 1660 int newest = -1; | |
| 1661 int oldest = -1; | |
| 1662 Time access_times[kListsToSearch]; | |
| 1663 for (int i = 0; i < kListsToSearch; i++) { | |
| 1664 if (entries[i].get()) { | |
| 1665 access_times[i] = entries[i]->GetLastUsed(); | |
| 1666 if (newest < 0) { | |
| 1667 DCHECK_LT(oldest, 0); | |
| 1668 newest = oldest = i; | |
| 1669 continue; | |
| 1670 } | |
| 1671 if (access_times[i] > access_times[newest]) | |
| 1672 newest = i; | |
| 1673 if (access_times[i] < access_times[oldest]) | |
| 1674 oldest = i; | |
| 1675 } | |
| 1676 } | |
| 1677 | |
| 1678 if (newest < 0 || oldest < 0) | |
| 1679 return NULL; | |
| 1680 | |
| 1681 EntryImpl* next_entry; | |
| 1682 if (forward) { | |
| 1683 next_entry = entries[newest].get(); | |
| 1684 iterator->list = static_cast<Rankings::List>(newest); | |
| 1685 } else { | |
| 1686 next_entry = entries[oldest].get(); | |
| 1687 iterator->list = static_cast<Rankings::List>(oldest); | |
| 1688 } | |
| 1689 | |
| 1690 *iter = iterator.release(); | |
| 1691 next_entry->AddRef(); | |
| 1692 return next_entry; | |
| 1693 } | |
| 1694 | |
| 1695 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list, | |
| 1696 CacheRankingsBlock** from_entry, | |
| 1697 EntryImpl** next_entry) { | |
| 1698 if (disabled_) | |
| 1699 return false; | |
| 1700 | |
| 1701 if (!new_eviction_ && Rankings::NO_USE != list) | |
| 1702 return false; | |
| 1703 | |
| 1704 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry); | |
| 1705 CacheRankingsBlock* next_block = forward ? | |
| 1706 rankings_.GetNext(rankings.get(), list) : | |
| 1707 rankings_.GetPrev(rankings.get(), list); | |
| 1708 Rankings::ScopedRankingsBlock next(&rankings_, next_block); | |
| 1709 *from_entry = NULL; | |
| 1710 | |
| 1711 *next_entry = GetEnumeratedEntry(next.get(), list); | |
| 1712 if (!*next_entry) | |
| 1713 return false; | |
| 1714 | |
| 1715 *from_entry = next.release(); | |
| 1716 return true; | |
| 1717 } | |
| 1718 | |
| 1719 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next, | |
| 1720 Rankings::List list) { | |
| 1721 if (!next || disabled_) | |
| 1722 return NULL; | |
| 1723 | |
| 1724 EntryImpl* entry; | |
| 1725 int rv = NewEntry(Addr(next->Data()->contents), &entry); | |
| 1726 if (rv) { | |
| 1727 STRESS_NOTREACHED(); | |
| 1728 rankings_.Remove(next, list, false); | |
| 1729 if (rv == ERR_INVALID_ADDRESS) { | |
| 1730 // There is nothing linked from the index. Delete the rankings node. | |
| 1731 DeleteBlock(next->address(), true); | |
| 1732 } | |
| 1733 return NULL; | |
| 1734 } | |
| 1735 | |
| 1736 if (entry->dirty()) { | |
| 1737 // We cannot trust this entry. | |
| 1738 InternalDoomEntry(entry); | |
| 1739 entry->Release(); | |
| 1740 return NULL; | |
| 1741 } | |
| 1742 | |
| 1743 if (!entry->Update()) { | |
| 1744 STRESS_NOTREACHED(); | |
| 1745 entry->Release(); | |
| 1746 return NULL; | |
| 1747 } | |
| 1748 | |
| 1749 // Note that it is unfortunate (but possible) for this entry to be clean, but | |
| 1750 // not actually the real entry. In other words, we could have lost this entry | |
| 1751 // from the index, and it could have been replaced with a newer one. It's not | |
| 1752 // worth checking that this entry is "the real one", so we just return it and | |
| 1753 // let the enumeration continue; this entry will be evicted at some point, and | |
| 1754 // the regular path will work with the real entry. With time, this problem | |
| 1755 // will disasappear because this scenario is just a bug. | |
| 1756 | |
| 1757 // Make sure that we save the key for later. | |
| 1758 entry->GetKey(); | |
| 1759 | |
| 1760 return entry; | |
| 1761 } | |
| 1762 | |
| 1763 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) { | |
| 1764 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) { | |
| 1765 deleted_entry->Release(); | |
| 1766 stats_.OnEvent(Stats::CREATE_MISS); | |
| 1767 Trace("create entry miss "); | |
| 1768 return NULL; | |
| 1769 } | |
| 1770 | |
| 1771 // We are attempting to create an entry and found out that the entry was | |
| 1772 // previously deleted. | |
| 1773 | |
| 1774 eviction_.OnCreateEntry(deleted_entry); | |
| 1775 entry_count_++; | |
| 1776 | |
| 1777 stats_.OnEvent(Stats::RESURRECT_HIT); | |
| 1778 Trace("Resurrect entry hit "); | |
| 1779 return deleted_entry; | |
| 1780 } | |
| 1781 | |
| 1782 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) { | |
| 1783 LOG(WARNING) << "Destroying invalid entry."; | |
| 1784 Trace("Destroying invalid entry 0x%p", entry); | |
| 1785 | |
| 1786 entry->SetPointerForInvalidEntry(GetCurrentEntryId()); | |
| 1787 | |
| 1788 eviction_.OnDoomEntry(entry); | |
| 1789 entry->InternalDoom(); | |
| 1790 | |
| 1791 if (!new_eviction_) | |
| 1792 DecreaseNumEntries(); | |
| 1793 stats_.OnEvent(Stats::INVALID_ENTRY); | |
| 1794 } | |
| 1795 | |
| 1796 void BackendImpl::AddStorageSize(int32 bytes) { | |
| 1797 data_->header.num_bytes += bytes; | |
| 1798 DCHECK_GE(data_->header.num_bytes, 0); | |
| 1799 } | |
| 1800 | |
| 1801 void BackendImpl::SubstractStorageSize(int32 bytes) { | |
| 1802 data_->header.num_bytes -= bytes; | |
| 1803 DCHECK_GE(data_->header.num_bytes, 0); | |
| 1804 } | |
| 1805 | |
| 1806 void BackendImpl::IncreaseNumRefs() { | |
| 1807 num_refs_++; | |
| 1808 if (max_refs_ < num_refs_) | |
| 1809 max_refs_ = num_refs_; | |
| 1810 } | |
| 1811 | |
| 1812 void BackendImpl::DecreaseNumRefs() { | |
| 1813 DCHECK(num_refs_); | |
| 1814 num_refs_--; | |
| 1815 | |
| 1816 if (!num_refs_ && disabled_) | |
| 1817 base::MessageLoop::current()->PostTask( | |
| 1818 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true)); | |
| 1819 } | |
| 1820 | |
| 1821 void BackendImpl::IncreaseNumEntries() { | |
| 1822 data_->header.num_entries++; | |
| 1823 DCHECK_GT(data_->header.num_entries, 0); | |
| 1824 } | |
| 1825 | |
| 1826 void BackendImpl::DecreaseNumEntries() { | |
| 1827 data_->header.num_entries--; | |
| 1828 if (data_->header.num_entries < 0) { | |
| 1829 NOTREACHED(); | |
| 1830 data_->header.num_entries = 0; | |
| 1831 } | |
| 1832 } | |
| 1833 | |
| 1834 void BackendImpl::LogStats() { | |
| 1835 StatsItems stats; | |
| 1836 GetStats(&stats); | |
| 1837 | |
| 1838 for (size_t index = 0; index < stats.size(); index++) | |
| 1839 VLOG(1) << stats[index].first << ": " << stats[index].second; | |
| 1840 } | |
| 1841 | |
| 1842 void BackendImpl::ReportStats() { | |
| 1843 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries); | |
| 1844 | |
| 1845 int current_size = data_->header.num_bytes / (1024 * 1024); | |
| 1846 int max_size = max_size_ / (1024 * 1024); | |
| 1847 int hit_ratio_as_percentage = stats_.GetHitRatio(); | |
| 1848 | |
| 1849 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size); | |
| 1850 // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the | |
| 1851 // ratio of that bin's total count to the count in the same bin in the Size2 | |
| 1852 // histogram. | |
| 1853 if (base::RandInt(0, 99) < hit_ratio_as_percentage) | |
| 1854 CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size); | |
| 1855 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size); | |
| 1856 if (!max_size) | |
| 1857 max_size++; | |
| 1858 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size); | |
| 1859 | |
| 1860 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0, | |
| 1861 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); | |
| 1862 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0, | |
| 1863 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); | |
| 1864 stats_.SetCounter(Stats::MAX_ENTRIES, 0); | |
| 1865 | |
| 1866 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0, | |
| 1867 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); | |
| 1868 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0, | |
| 1869 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); | |
| 1870 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0, | |
| 1871 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); | |
| 1872 stats_.SetCounter(Stats::FATAL_ERROR, 0); | |
| 1873 stats_.SetCounter(Stats::DOOM_CACHE, 0); | |
| 1874 stats_.SetCounter(Stats::DOOM_RECENT, 0); | |
| 1875 | |
| 1876 int age = (Time::Now() - | |
| 1877 Time::FromInternalValue(data_->header.create_time)).InHours(); | |
| 1878 if (age) | |
| 1879 CACHE_UMA(HOURS, "FilesAge", 0, age); | |
| 1880 | |
| 1881 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; | |
| 1882 if (!data_->header.create_time || !data_->header.lru.filled) { | |
| 1883 int cause = data_->header.create_time ? 0 : 1; | |
| 1884 if (!data_->header.lru.filled) | |
| 1885 cause |= 2; | |
| 1886 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause); | |
| 1887 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours)); | |
| 1888 return; | |
| 1889 } | |
| 1890 | |
| 1891 // This is an up to date client that will report FirstEviction() data. After | |
| 1892 // that event, start reporting this: | |
| 1893 | |
| 1894 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours)); | |
| 1895 // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total | |
| 1896 // time is the ratio of that bin's total count to the count in the same bin in | |
| 1897 // the TotalTime histogram. | |
| 1898 if (base::RandInt(0, 99) < hit_ratio_as_percentage) | |
| 1899 CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours)); | |
| 1900 | |
| 1901 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; | |
| 1902 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); | |
| 1903 | |
| 1904 // We may see users with no use_hours at this point if this is the first time | |
| 1905 // we are running this code. | |
| 1906 if (use_hours) | |
| 1907 use_hours = total_hours - use_hours; | |
| 1908 | |
| 1909 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes) | |
| 1910 return; | |
| 1911 | |
| 1912 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours)); | |
| 1913 // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time | |
| 1914 // is the ratio of that bin's total count to the count in the same bin in the | |
| 1915 // UseTime histogram. | |
| 1916 if (base::RandInt(0, 99) < hit_ratio_as_percentage) | |
| 1917 CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours)); | |
| 1918 CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage); | |
| 1919 | |
| 1920 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; | |
| 1921 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate)); | |
| 1922 | |
| 1923 int avg_size = data_->header.num_bytes / GetEntryCount(); | |
| 1924 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); | |
| 1925 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries); | |
| 1926 | |
| 1927 CACHE_UMA(PERCENTAGE, "IndexLoad", 0, | |
| 1928 data_->header.num_entries * 100 / (mask_ + 1)); | |
| 1929 | |
| 1930 int large_entries_bytes = stats_.GetLargeEntriesSize(); | |
| 1931 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes; | |
| 1932 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio); | |
| 1933 | |
| 1934 if (new_eviction_) { | |
| 1935 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio()); | |
| 1936 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, | |
| 1937 data_->header.lru.sizes[0] * 100 / data_->header.num_entries); | |
| 1938 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, | |
| 1939 data_->header.lru.sizes[1] * 100 / data_->header.num_entries); | |
| 1940 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, | |
| 1941 data_->header.lru.sizes[2] * 100 / data_->header.num_entries); | |
| 1942 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0, | |
| 1943 data_->header.lru.sizes[4] * 100 / data_->header.num_entries); | |
| 1944 } | |
| 1945 | |
| 1946 stats_.ResetRatios(); | |
| 1947 stats_.SetCounter(Stats::TRIM_ENTRY, 0); | |
| 1948 | |
| 1949 if (cache_type_ == net::DISK_CACHE) | |
| 1950 block_files_.ReportStats(); | |
| 1951 } | |
| 1952 | |
| 1953 void BackendImpl::UpgradeTo2_1() { | |
| 1954 // 2.1 is basically the same as 2.0, except that new fields are actually | |
| 1955 // updated by the new eviction algorithm. | |
| 1956 DCHECK(0x20000 == data_->header.version); | |
| 1957 data_->header.version = 0x20001; | |
| 1958 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries; | |
| 1959 } | |
| 1960 | |
| 1961 bool BackendImpl::CheckIndex() { | |
| 1962 DCHECK(data_); | |
| 1963 | |
| 1964 size_t current_size = index_->GetLength(); | |
| 1965 if (current_size < sizeof(Index)) { | |
| 1966 LOG(ERROR) << "Corrupt Index file"; | |
| 1967 return false; | |
| 1968 } | |
| 1969 | |
| 1970 if (new_eviction_) { | |
| 1971 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1. | |
| 1972 if (kIndexMagic != data_->header.magic || | |
| 1973 kCurrentVersion >> 16 != data_->header.version >> 16) { | |
| 1974 LOG(ERROR) << "Invalid file version or magic"; | |
| 1975 return false; | |
| 1976 } | |
| 1977 if (kCurrentVersion == data_->header.version) { | |
| 1978 // We need file version 2.1 for the new eviction algorithm. | |
| 1979 UpgradeTo2_1(); | |
| 1980 } | |
| 1981 } else { | |
| 1982 if (kIndexMagic != data_->header.magic || | |
| 1983 kCurrentVersion != data_->header.version) { | |
| 1984 LOG(ERROR) << "Invalid file version or magic"; | |
| 1985 return false; | |
| 1986 } | |
| 1987 } | |
| 1988 | |
| 1989 if (!data_->header.table_len) { | |
| 1990 LOG(ERROR) << "Invalid table size"; | |
| 1991 return false; | |
| 1992 } | |
| 1993 | |
| 1994 if (current_size < GetIndexSize(data_->header.table_len) || | |
| 1995 data_->header.table_len & (kBaseTableLen - 1)) { | |
| 1996 LOG(ERROR) << "Corrupt Index file"; | |
| 1997 return false; | |
| 1998 } | |
| 1999 | |
| 2000 AdjustMaxCacheSize(data_->header.table_len); | |
| 2001 | |
| 2002 #if !defined(NET_BUILD_STRESS_CACHE) | |
| 2003 if (data_->header.num_bytes < 0 || | |
| 2004 (max_size_ < kint32max - kDefaultCacheSize && | |
| 2005 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) { | |
| 2006 LOG(ERROR) << "Invalid cache (current) size"; | |
| 2007 return false; | |
| 2008 } | |
| 2009 #endif | |
| 2010 | |
| 2011 if (data_->header.num_entries < 0) { | |
| 2012 LOG(ERROR) << "Invalid number of entries"; | |
| 2013 return false; | |
| 2014 } | |
| 2015 | |
| 2016 if (!mask_) | |
| 2017 mask_ = data_->header.table_len - 1; | |
| 2018 | |
| 2019 // Load the table into memory with a single read. | |
| 2020 scoped_ptr<char[]> buf(new char[current_size]); | |
| 2021 return index_->Read(buf.get(), current_size, 0); | |
| 2022 } | |
| 2023 | |
| 2024 int BackendImpl::CheckAllEntries() { | |
| 2025 int num_dirty = 0; | |
| 2026 int num_entries = 0; | |
| 2027 DCHECK(mask_ < kuint32max); | |
| 2028 for (unsigned int i = 0; i <= mask_; i++) { | |
| 2029 Addr address(data_->table[i]); | |
| 2030 if (!address.is_initialized()) | |
| 2031 continue; | |
| 2032 for (;;) { | |
| 2033 EntryImpl* tmp; | |
| 2034 int ret = NewEntry(address, &tmp); | |
| 2035 if (ret) { | |
| 2036 STRESS_NOTREACHED(); | |
| 2037 return ret; | |
| 2038 } | |
| 2039 scoped_refptr<EntryImpl> cache_entry; | |
| 2040 cache_entry.swap(&tmp); | |
| 2041 | |
| 2042 if (cache_entry->dirty()) | |
| 2043 num_dirty++; | |
| 2044 else if (CheckEntry(cache_entry.get())) | |
| 2045 num_entries++; | |
| 2046 else | |
| 2047 return ERR_INVALID_ENTRY; | |
| 2048 | |
| 2049 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_); | |
| 2050 address.set_value(cache_entry->GetNextAddress()); | |
| 2051 if (!address.is_initialized()) | |
| 2052 break; | |
| 2053 } | |
| 2054 } | |
| 2055 | |
| 2056 Trace("CheckAllEntries End"); | |
| 2057 if (num_entries + num_dirty != data_->header.num_entries) { | |
| 2058 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << | |
| 2059 " " << data_->header.num_entries; | |
| 2060 DCHECK_LT(num_entries, data_->header.num_entries); | |
| 2061 return ERR_NUM_ENTRIES_MISMATCH; | |
| 2062 } | |
| 2063 | |
| 2064 return num_dirty; | |
| 2065 } | |
| 2066 | |
| 2067 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) { | |
| 2068 bool ok = block_files_.IsValid(cache_entry->entry()->address()); | |
| 2069 ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); | |
| 2070 EntryStore* data = cache_entry->entry()->Data(); | |
| 2071 for (size_t i = 0; i < arraysize(data->data_addr); i++) { | |
| 2072 if (data->data_addr[i]) { | |
| 2073 Addr address(data->data_addr[i]); | |
| 2074 if (address.is_block_file()) | |
| 2075 ok = ok && block_files_.IsValid(address); | |
| 2076 } | |
| 2077 } | |
| 2078 | |
| 2079 return ok && cache_entry->rankings()->VerifyHash(); | |
| 2080 } | |
| 2081 | |
| 2082 int BackendImpl::MaxBuffersSize() { | |
| 2083 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); | |
| 2084 static bool done = false; | |
| 2085 | |
| 2086 if (!done) { | |
| 2087 const int kMaxBuffersSize = 30 * 1024 * 1024; | |
| 2088 | |
| 2089 // We want to use up to 2% of the computer's memory. | |
| 2090 total_memory = total_memory * 2 / 100; | |
| 2091 if (total_memory > kMaxBuffersSize || total_memory <= 0) | |
| 2092 total_memory = kMaxBuffersSize; | |
| 2093 | |
| 2094 done = true; | |
| 2095 } | |
| 2096 | |
| 2097 return static_cast<int>(total_memory); | |
| 2098 } | |
| 2099 | |
| 2100 } // namespace disk_cache | |
| OLD | NEW |