OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "net/disk_cache/v3/backend_impl_v3.h" |
| 6 |
| 7 #include "base/bind.h" |
| 8 #include "base/bind_helpers.h" |
| 9 #include "base/file_util.h" |
| 10 #include "base/files/file_path.h" |
| 11 #include "base/hash.h" |
| 12 #include "base/message_loop.h" |
| 13 #include "base/metrics/field_trial.h" |
| 14 #include "base/metrics/histogram.h" |
| 15 #include "base/metrics/stats_counters.h" |
| 16 #include "base/rand_util.h" |
| 17 #include "base/string_util.h" |
| 18 #include "base/stringprintf.h" |
| 19 #include "base/sys_info.h" |
| 20 #include "base/threading/thread_restrictions.h" |
| 21 #include "base/threading/worker_pool.h" |
| 22 #include "base/time.h" |
| 23 #include "base/timer.h" |
| 24 #include "net/base/net_errors.h" |
| 25 #include "net/base/io_buffer.h" |
| 26 #include "net/disk_cache/errors.h" |
| 27 #include "net/disk_cache/experiments.h" |
| 28 #include "net/disk_cache/file.h" |
| 29 #include "net/disk_cache/storage_block-inl.h" |
| 30 #include "net/disk_cache/v3/backend_worker.h" |
| 31 #include "net/disk_cache/v3/backend_work_item.h" |
| 32 #include "net/disk_cache/v3/disk_format_v3.h" |
| 33 #include "net/disk_cache/v3/entry_impl_v3.h" |
| 34 #include "net/disk_cache/v3/index_table.h" |
| 35 |
| 36 // This has to be defined before including histogram_macros.h from this file. |
| 37 #define NET_DISK_CACHE_BACKEND_IMPL_CC_ |
| 38 #include "net/disk_cache/v3/histogram_macros.h" |
| 39 |
| 40 using base::Time; |
| 41 using base::TimeDelta; |
| 42 using base::TimeTicks; |
| 43 |
| 44 namespace { |
| 45 |
| 46 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. |
| 47 // Note that the actual target is to keep the index table load factor under 55% |
| 48 // for most users. |
| 49 const int k64kEntriesStore = 240 * 1000 * 1000; |
| 50 const int kBaseTableLen = 64 * 1024; |
| 51 const int kDefaultCacheSize = 80 * 1024 * 1024; |
| 52 |
| 53 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). |
| 54 const int kTrimDelay = 10; |
| 55 const int kTimerSeconds = 30; |
| 56 |
| 57 const size_t kMaxKeySize = 64 * 1024; |
| 58 |
| 59 int DesiredIndexTableLen(int32 storage_size) { |
| 60 if (storage_size <= k64kEntriesStore) |
| 61 return kBaseTableLen; |
| 62 if (storage_size <= k64kEntriesStore * 2) |
| 63 return kBaseTableLen * 2; |
| 64 if (storage_size <= k64kEntriesStore * 4) |
| 65 return kBaseTableLen * 4; |
| 66 if (storage_size <= k64kEntriesStore * 8) |
| 67 return kBaseTableLen * 8; |
| 68 |
| 69 // The biggest storage_size for int32 requires a 4 MB table. |
| 70 return kBaseTableLen * 16; |
| 71 } |
| 72 |
| 73 int MaxStorageSizeForTable(int table_len) { |
| 74 return table_len * (k64kEntriesStore / kBaseTableLen); |
| 75 } |
| 76 |
| 77 size_t GetIndexBitmapSize(int table_len) { |
| 78 DCHECK_LT(table_len, 1 << 22); |
| 79 size_t base_bits = disk_cache::kBaseBitmapBytes * 8; |
| 80 if (table_len < static_cast<int>(base_bits)) |
| 81 return sizeof(disk_cache::IndexBitmap); |
| 82 |
| 83 size_t num_pages = (table_len / 8) - disk_cache::kBaseBitmapBytes; |
| 84 num_pages = (num_pages + 4095) / 4096; |
| 85 return sizeof(disk_cache::IndexHeaderV3) + num_pages * 4096; |
| 86 } |
| 87 |
| 88 } // namespace |
| 89 |
| 90 // ------------------------------------------------------------------------ |
| 91 |
| 92 namespace disk_cache { |
| 93 |
| 94 // Exported by disk_cache/backend_impl.cc |
| 95 // Returns the preferred max cache size given the available disk space. |
| 96 NET_EXPORT_PRIVATE int PreferedCacheSize(int64 available); |
| 97 |
| 98 BackendImplV3::BackendImplV3(const base::FilePath& path, |
| 99 base::MessageLoopProxy* cache_thread, |
| 100 net::NetLog* net_log) |
| 101 : index_(this), |
| 102 path_(path), |
| 103 block_files_(this), |
| 104 max_size_(0), |
| 105 up_ticks_(0), |
| 106 test_seconds_(0), |
| 107 cache_type_(net::DISK_CACHE), |
| 108 uma_report_(0), |
| 109 user_flags_(0), |
| 110 init_(false), |
| 111 restarted_(false), |
| 112 read_only_(false), |
| 113 disabled_(false), |
| 114 lru_eviction_(true), |
| 115 first_timer_(true), |
| 116 user_load_(false), |
| 117 growing_index_(false), |
| 118 growing_files_(false), |
| 119 net_log_(net_log), |
| 120 cache_thread_(cache_thread), |
| 121 ptr_factory_(this) { |
| 122 } |
| 123 |
| 124 BackendImplV3::~BackendImplV3() { |
| 125 CleanupCache(); |
| 126 } |
| 127 |
| 128 int BackendImplV3::Init(const CompletionCallback& callback) { |
| 129 DCHECK(!init_); |
| 130 if (init_) |
| 131 return net::ERR_FAILED; |
| 132 |
| 133 worker_ = new Worker(path_, base::MessageLoopProxy::current()); |
| 134 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_INIT); |
| 135 work_item->set_user_callback(callback); |
| 136 work_item->set_flags(user_flags_); |
| 137 PostWorkItem(work_item); |
| 138 |
| 139 return net::ERR_IO_PENDING; |
| 140 } |
| 141 |
| 142 // ------------------------------------------------------------------------ |
| 143 |
| 144 int BackendImplV3::OpenPrevEntry(void** iter, Entry** prev_entry, |
| 145 const CompletionCallback& callback) { |
| 146 DCHECK(!callback.is_null()); |
| 147 return OpenFollowingEntry(true, iter, prev_entry, callback); |
| 148 } |
| 149 |
| 150 bool BackendImplV3::SetMaxSize(int max_bytes) { |
| 151 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); |
| 152 if (max_bytes < 0) |
| 153 return false; |
| 154 |
| 155 // Zero size means use the default. |
| 156 if (!max_bytes) |
| 157 return true; |
| 158 |
| 159 // Avoid a DCHECK later on. |
| 160 if (max_bytes >= kint32max - kint32max / 10) |
| 161 max_bytes = kint32max - kint32max / 10 - 1; |
| 162 |
| 163 user_flags_ |= MAX_SIZE; |
| 164 max_size_ = max_bytes; |
| 165 return true; |
| 166 } |
| 167 |
| 168 void BackendImplV3::SetType(net::CacheType type) { |
| 169 DCHECK_NE(net::MEMORY_CACHE, type); |
| 170 cache_type_ = type; |
| 171 } |
| 172 |
| 173 bool BackendImplV3::CreateBlock(FileType block_type, int block_count, |
| 174 Addr* block_address) { |
| 175 return block_files_.CreateBlock(block_type, block_count, block_address); |
| 176 } |
| 177 |
| 178 void BackendImplV3::UpdateRank(EntryImplV3* entry, bool modified) { |
| 179 if (!modified && (cache_type() == net::SHADER_CACHE || read_only_)) |
| 180 return; |
| 181 |
| 182 index_.UpdateTime(entry->GetHash(), entry->GetAddress(), GetTime()); |
| 183 } |
| 184 |
| 185 void BackendImplV3::InternalDoomEntry(EntryImplV3* entry) { |
| 186 uint32 hash = entry->GetHash(); |
| 187 std::string key = entry->GetKey(); |
| 188 Addr entry_addr = entry->GetAddress(); |
| 189 |
| 190 Trace("Doom entry 0x%p", entry); |
| 191 |
| 192 index_.SetSate(hash, entry_addr, ENTRY_DELETED); |
| 193 |
| 194 // The entry is transitioning from open to doomed. |
| 195 doomed_entries_[entry_addr.value()] = entry; |
| 196 EntriesMap::iterator it = open_entries_.find(entry_addr.value()); |
| 197 if (it != open_entries_.end()) |
| 198 open_entries_.erase(it); |
| 199 else |
| 200 NOTREACHED(); |
| 201 |
| 202 entry->InternalDoom(); |
| 203 DecreaseNumEntries(); |
| 204 } |
| 205 |
| 206 bool BackendImplV3::ShouldDeleteNow(EntryImplV3* entry) { |
| 207 Addr entry_addr = entry->GetAddress(); |
| 208 DCHECK(doomed_entries_.count(entry_addr.value())); |
| 209 EntriesMap::iterator it = entries_to_delete_.find(entry_addr.value()); |
| 210 if (it == entries_to_delete_.end()) { |
| 211 // Delay deletion until the next backup cycle. |
| 212 entries_to_delete_[entry_addr.value()] = entry; |
| 213 entry->AddRef(); |
| 214 |
| 215 // The entry was ready to be deleted. By opening it again we make sure |
| 216 // we'll go again through the normal Close() logic later on, and we'll have |
| 217 // a second chance to allow deletion. |
| 218 entry->OnOpenEntry(); |
| 219 return false; |
| 220 } |
| 221 |
| 222 entries_to_delete_.erase(it); |
| 223 return true; |
| 224 } |
| 225 |
| 226 void BackendImplV3::OnEntryCleanup(EntryImplV3* entry) { |
| 227 // An entry may be going away pretty soon (as soon as all pending IO is done). |
| 228 // Grab an extra reference so that the entry is alive for a little longer and |
| 229 // we may reuse it directly. |
| 230 if (recent_entries_.insert(entry).second) |
| 231 entry->AddRef(); |
| 232 } |
| 233 |
| 234 void BackendImplV3::OnEntryDestroyBegin(Addr address) { |
| 235 if (disabled_) |
| 236 return; |
| 237 EntriesMap::iterator it = open_entries_.find(address.value()); |
| 238 if (it != open_entries_.end()) { |
| 239 index_.SetSate(it->second->GetHash(), address, ENTRY_USED); |
| 240 open_entries_.erase(it); |
| 241 } else { |
| 242 it = doomed_entries_.find(address.value()); |
| 243 if (it != doomed_entries_.end()) { |
| 244 // All data is gone. Wait for the next backup cycle before releasing the |
| 245 // cell itself. |
| 246 CellInfo cell_info = { it->second->GetHash(), address }; |
| 247 deleted_entries_.push_back(cell_info); |
| 248 doomed_entries_.erase(it); |
| 249 } |
| 250 } |
| 251 } |
| 252 |
| 253 void BackendImplV3::OnEntryDestroyEnd() { |
| 254 DecreaseNumRefs(); |
| 255 if (disabled_) |
| 256 return; |
| 257 if (index_.header()->num_bytes > max_size_ && !read_only_ && |
| 258 (up_ticks_ > kTrimDelay || user_flags_ & BASIC_UNIT_TEST)) { |
| 259 eviction_.TrimCache(); |
| 260 } |
| 261 } |
| 262 |
| 263 void BackendImplV3::OnEntryModified(EntryImplV3* entry) { |
| 264 index_.SetSate(entry->GetHash(), entry->GetAddress(), ENTRY_MODIFIED); |
| 265 } |
| 266 |
| 267 void BackendImplV3::ReadData(EntryImplV3* entry, Addr address, int offset, |
| 268 net::IOBuffer* buffer, int buffer_len, |
| 269 const CompletionCallback& callback) { |
| 270 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_READ_DATA); |
| 271 work_item->set_buffer(buffer); |
| 272 work_item->set_buffer_len(buffer_len); |
| 273 work_item->set_address(address); |
| 274 work_item->set_offset(offset); |
| 275 work_item->set_user_callback(callback); |
| 276 if (entry) |
| 277 work_item->set_owner_entry(entry); |
| 278 |
| 279 PostWorkItem(work_item); |
| 280 } |
| 281 |
| 282 void BackendImplV3::WriteData(EntryImplV3* entry, Addr address, int offset, |
| 283 net::IOBuffer* buffer, int buffer_len, |
| 284 const CompletionCallback& callback) { |
| 285 if (!buffer_len) { |
| 286 DCHECK(callback.is_null()); |
| 287 return; |
| 288 } |
| 289 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_DATA); |
| 290 work_item->set_buffer(buffer); |
| 291 work_item->set_buffer_len(buffer_len); |
| 292 work_item->set_address(address); |
| 293 work_item->set_offset(offset); |
| 294 work_item->set_user_callback(callback); |
| 295 work_item->set_owner_entry(entry); |
| 296 PostWorkItem(work_item); |
| 297 } |
| 298 |
| 299 void BackendImplV3::MoveData(EntryImplV3* entry, Addr source, |
| 300 Addr destination, int len, |
| 301 const CompletionCallback& callback) { |
| 302 DCHECK(len); |
| 303 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_MOVE_DATA); |
| 304 work_item->set_buffer_len(len); |
| 305 work_item->set_address(source); |
| 306 work_item->set_address2(destination); |
| 307 work_item->set_user_callback(callback); |
| 308 work_item->set_owner_entry(entry); |
| 309 PostWorkItem(work_item);//+delete source |
| 310 } |
| 311 |
| 312 void BackendImplV3::Truncate(EntryImplV3* entry, Addr address, int offset) { |
| 313 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_TRUNCATE); |
| 314 work_item->set_address(address); |
| 315 work_item->set_offset(offset); |
| 316 work_item->set_owner_entry(entry); |
| 317 PostWorkItem(work_item); |
| 318 } |
| 319 |
| 320 void BackendImplV3::Delete(EntryImplV3* entry, Addr address) { |
| 321 if (disabled_) |
| 322 return; |
| 323 if (address.is_separate_file()) { |
| 324 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_DELETE); |
| 325 work_item->set_address(address); |
| 326 work_item->set_owner_entry(entry); |
| 327 PostWorkItem(work_item); |
| 328 |
| 329 // And now delete the block itself. |
| 330 address = address.AsBlockFile(); |
| 331 } |
| 332 |
| 333 int size = Addr::BlockSizeForFileType(address.file_type()); |
| 334 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size)); |
| 335 memset(buffer->data(), 0, size); |
| 336 WriteData(entry, address, 0, buffer, size, net::CompletionCallback()); |
| 337 |
| 338 block_files_.DeleteBlock(address); |
| 339 } |
| 340 |
| 341 void BackendImplV3::Close(EntryImplV3* entry, Addr address) { |
| 342 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLOSE); |
| 343 work_item->set_address(address); |
| 344 work_item->set_owner_entry(entry); |
| 345 PostWorkItem(work_item); |
| 346 } |
| 347 |
| 348 bool BackendImplV3::EvictEntry(uint32 hash, Addr address) { |
| 349 EntriesMap::iterator it = open_entries_.find(address.value()); |
| 350 if (it != open_entries_.end()) |
| 351 return false; |
| 352 |
| 353 EntryCell old_cell = index_.FindEntryCell(hash, address); |
| 354 if (!old_cell.IsValid() || old_cell.GetState() != ENTRY_USED) |
| 355 return false; |
| 356 |
| 357 EntrySet entries; |
| 358 entries.cells.push_back(old_cell); |
| 359 |
| 360 uint32 flags = WorkItem::WORK_FOR_EVICT; |
| 361 if (lru_eviction_) { |
| 362 flags |= WorkItem::WORK_NO_COPY; |
| 363 } else { |
| 364 Addr new_address; |
| 365 if (!block_files_.CreateBlock(BLOCK_EVICTED, 1, &new_address)) |
| 366 return false; |
| 367 |
| 368 EntryCell new_cell = index_.CreateEntryCell(hash, new_address); |
| 369 if (!new_cell.IsValid()) { |
| 370 block_files_.DeleteBlock(new_address); |
| 371 return false; |
| 372 } |
| 373 entries.cells.push_back(new_cell); |
| 374 } |
| 375 |
| 376 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 377 work_item->set_flags(flags); |
| 378 work_item->set_entries(entries); |
| 379 PostWorkItem(work_item); |
| 380 |
| 381 return true; |
| 382 } |
| 383 |
| 384 EntryImplV3* BackendImplV3::GetOpenEntry(Addr address) const { |
| 385 EntriesMap::const_iterator it = open_entries_.find(address.value()); |
| 386 if (it != open_entries_.end()) { |
| 387 // We have this entry in memory. |
| 388 it->second->AddRef(); |
| 389 it->second->OnOpenEntry(); |
| 390 return it->second; |
| 391 } |
| 392 |
| 393 return NULL; |
| 394 } |
| 395 |
| 396 int BackendImplV3::MaxFileSize() const { |
| 397 return max_size_ / 8; |
| 398 } |
| 399 |
| 400 void BackendImplV3::ModifyStorageSize(int32 old_size, int32 new_size) { |
| 401 if (disabled_ || old_size == new_size) |
| 402 return; |
| 403 if (old_size > new_size) |
| 404 SubstractStorageSize(old_size - new_size); |
| 405 else |
| 406 AddStorageSize(new_size - old_size); |
| 407 |
| 408 // Update the usage statistics. |
| 409 stats_.ModifyStorageStats(old_size, new_size); |
| 410 } |
| 411 |
| 412 void BackendImplV3::TooMuchStorageRequested(int32 size) { |
| 413 stats_.ModifyStorageStats(0, size); |
| 414 } |
| 415 |
| 416 bool BackendImplV3::IsAllocAllowed(int current_size, int new_size, bool force) { |
| 417 DCHECK_GT(new_size, current_size); |
| 418 if (!force && (user_flags_ & NO_BUFFERING)) |
| 419 return false; |
| 420 |
| 421 int to_add = new_size - current_size; |
| 422 if (!force && (buffer_bytes_ + to_add > MaxBuffersSize())) |
| 423 return false; |
| 424 |
| 425 buffer_bytes_ += to_add; |
| 426 CACHE_UMA(COUNTS_50000, "BufferBytes", buffer_bytes_ / 1024); |
| 427 return true; |
| 428 } |
| 429 |
| 430 void BackendImplV3::BufferDeleted(int size) { |
| 431 DCHECK_GE(size, 0); |
| 432 buffer_bytes_ -= size; |
| 433 DCHECK_GE(buffer_bytes_, 0); |
| 434 } |
| 435 |
| 436 bool BackendImplV3::IsLoaded() const { |
| 437 if (user_flags_ & NO_LOAD_PROTECTION) |
| 438 return false; |
| 439 |
| 440 return user_load_; |
| 441 } |
| 442 |
| 443 base::Time BackendImplV3::GetTime() const { |
| 444 Time base_time = Time::Now(); |
| 445 if (!test_seconds_) |
| 446 return base_time; |
| 447 |
| 448 return base_time + TimeDelta::FromSeconds(test_seconds_); |
| 449 } |
| 450 |
| 451 std::string BackendImplV3::HistogramName(const char* name) const { |
| 452 static const char* names[] = { "Http", "", "Media", "AppCache", "Shader" }; |
| 453 DCHECK_NE(cache_type_, net::MEMORY_CACHE); |
| 454 return base::StringPrintf("DiskCache3.%s_%s", name, names[cache_type_]); |
| 455 } |
| 456 |
| 457 base::WeakPtr<BackendImplV3> BackendImplV3::GetWeakPtr() { |
| 458 return ptr_factory_.GetWeakPtr(); |
| 459 } |
| 460 |
| 461 // We want to remove biases from some histograms so we only send data once per |
| 462 // week. |
| 463 bool BackendImplV3::ShouldReportAgain() { |
| 464 if (uma_report_) |
| 465 return uma_report_ == 2; |
| 466 |
| 467 uma_report_++; |
| 468 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); |
| 469 Time last_time = Time::FromInternalValue(last_report); |
| 470 if (!last_report || (GetTime() - last_time).InDays() >= 7) { |
| 471 stats_.SetCounter(Stats::LAST_REPORT, GetTime().ToInternalValue()); |
| 472 uma_report_++; |
| 473 return true; |
| 474 } |
| 475 return false; |
| 476 } |
| 477 |
| 478 void BackendImplV3::FirstEviction() { |
| 479 IndexHeaderV3* header = index_.header(); |
| 480 header->flags |= CACHE_EVICTED; |
| 481 DCHECK(header->create_time); |
| 482 if (!GetEntryCount()) |
| 483 return; // This is just for unit tests. |
| 484 |
| 485 Time create_time = Time::FromInternalValue(header->create_time); |
| 486 CACHE_UMA(AGE, "FillupAge", create_time); |
| 487 |
| 488 int64 use_time = stats_.GetCounter(Stats::TIMER); |
| 489 CACHE_UMA(HOURS, "FillupTime", static_cast<int>(use_time / 120)); |
| 490 CACHE_UMA(PERCENTAGE, "FirstHitRatio", stats_.GetHitRatio()); |
| 491 |
| 492 if (!use_time) |
| 493 use_time = 1; |
| 494 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", |
| 495 static_cast<int>(header->num_entries / use_time)); |
| 496 CACHE_UMA(COUNTS, "FirstByteIORate", |
| 497 static_cast<int>((header->num_bytes / 1024) / use_time)); |
| 498 |
| 499 int avg_size = header->num_bytes / GetEntryCount(); |
| 500 CACHE_UMA(COUNTS, "FirstEntrySize", avg_size); |
| 501 |
| 502 int large_entries_bytes = stats_.GetLargeEntriesSize(); |
| 503 int large_ratio = large_entries_bytes * 100 / header->num_bytes; |
| 504 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", large_ratio); |
| 505 |
| 506 if (!lru_eviction_) { |
| 507 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", stats_.GetResurrectRatio()); |
| 508 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", |
| 509 header->num_no_use_entries * 100 / header->num_entries); |
| 510 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", |
| 511 header->num_low_use_entries * 100 / header->num_entries); |
| 512 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", |
| 513 header->num_high_use_entries * 100 / header->num_entries); |
| 514 } |
| 515 |
| 516 stats_.ResetRatios(); |
| 517 } |
| 518 |
| 519 void BackendImplV3::OnEvent(Stats::Counters an_event) { |
| 520 stats_.OnEvent(an_event); |
| 521 } |
| 522 |
| 523 void BackendImplV3::OnRead(int32 bytes) { |
| 524 DCHECK_GE(bytes, 0); |
| 525 byte_count_ += bytes; |
| 526 if (byte_count_ < 0) |
| 527 byte_count_ = kint32max; |
| 528 } |
| 529 |
| 530 void BackendImplV3::OnWrite(int32 bytes) { |
| 531 // We use the same implementation as OnRead... just log the number of bytes. |
| 532 OnRead(bytes); |
| 533 } |
| 534 |
| 535 void BackendImplV3::GrowBlockFiles() { |
| 536 if (growing_files_ || disabled_) |
| 537 return; |
| 538 growing_files_ = true; |
| 539 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_GROW_FILES); |
| 540 PostWorkItem(work_item); |
| 541 } |
| 542 |
| 543 void BackendImplV3::OnTimerTick() { |
| 544 if (disabled_) |
| 545 return; |
| 546 |
| 547 stats_.OnEvent(Stats::TIMER); |
| 548 int64 time = stats_.GetCounter(Stats::TIMER); |
| 549 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); |
| 550 |
| 551 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding |
| 552 // the bias towards 0. |
| 553 if (num_refs_ && (current != num_refs_)) { |
| 554 int64 diff = (num_refs_ - current) / 50; |
| 555 if (!diff) |
| 556 diff = num_refs_ > current ? 1 : -1; |
| 557 current = current + diff; |
| 558 stats_.SetCounter(Stats::OPEN_ENTRIES, current); |
| 559 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); |
| 560 } |
| 561 |
| 562 CACHE_UMA(COUNTS, "NumberOfReferences", num_refs_); |
| 563 |
| 564 CACHE_UMA(COUNTS_10000, "EntryAccessRate", entry_count_); |
| 565 CACHE_UMA(COUNTS, "ByteIORate", byte_count_ / 1024); |
| 566 |
| 567 // These values cover about 99.5% of the population (Oct 2011). |
| 568 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024); |
| 569 entry_count_ = 0; |
| 570 byte_count_ = 0; |
| 571 up_ticks_++; |
| 572 |
| 573 if (first_timer_) { |
| 574 first_timer_ = false; |
| 575 if (ShouldReportAgain()) |
| 576 ReportStats(); |
| 577 } |
| 578 |
| 579 index_.OnBackupTimer(); |
| 580 CloseDoomedEntries(); |
| 581 ReleaseRecentEntries(); |
| 582 UpdateDeletedEntries(); |
| 583 |
| 584 // Save stats to disk at 5 min intervals. |
| 585 if (time % 10 == 0) |
| 586 StoreStats(); |
| 587 } |
| 588 |
| 589 void BackendImplV3::SetUnitTestMode() { |
| 590 user_flags_ |= UNIT_TEST_MODE; |
| 591 } |
| 592 |
| 593 void BackendImplV3::SetUpgradeMode() { |
| 594 user_flags_ |= UPGRADE_MODE; |
| 595 read_only_ = true; |
| 596 } |
| 597 |
| 598 void BackendImplV3::SetNewEviction() { |
| 599 user_flags_ |= EVICTION_V2; |
| 600 lru_eviction_ = false; |
| 601 } |
| 602 |
| 603 void BackendImplV3::SetFlags(uint32 flags) { |
| 604 user_flags_ |= flags; |
| 605 } |
| 606 |
| 607 int BackendImplV3::FlushQueueForTest(const CompletionCallback& callback) { |
| 608 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_NONE); |
| 609 work_item->set_user_callback(callback); |
| 610 PostWorkItem(work_item); |
| 611 return net::ERR_IO_PENDING; |
| 612 } |
| 613 |
| 614 int BackendImplV3::CleanupForTest(const CompletionCallback& callback) { |
| 615 CloseDoomedEntries(); |
| 616 ReleaseRecentEntries(); |
| 617 UpdateDeletedEntries(); |
| 618 index_.OnBackupTimer(); |
| 619 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLEANUP); |
| 620 work_item->set_user_callback(callback); |
| 621 PostWorkItem(work_item); |
| 622 worker_ = NULL; |
| 623 init_ = false; |
| 624 disabled_ = true; |
| 625 index_.Reset(); |
| 626 return net::ERR_IO_PENDING; |
| 627 } |
| 628 |
| 629 void BackendImplV3::TrimForTest(bool empty) { |
| 630 eviction_.SetTestMode(); |
| 631 if (empty) |
| 632 eviction_.TrimAllCache(CompletionCallback()); |
| 633 else |
| 634 eviction_.TrimCache(); |
| 635 } |
| 636 |
| 637 void BackendImplV3::TrimDeletedListForTest(bool empty) { |
| 638 eviction_.SetTestMode(); |
| 639 eviction_.TrimDeletedList(empty); |
| 640 } |
| 641 |
| 642 void BackendImplV3::AddDelayForTest(int seconds) { |
| 643 Trace("Add %d deconds", seconds); |
| 644 int old_timers = test_seconds_ / kTimerSeconds; |
| 645 test_seconds_ += seconds; |
| 646 if (old_timers != test_seconds_ / kTimerSeconds) |
| 647 OnTimerTick(); |
| 648 } |
| 649 |
| 650 int BackendImplV3::WaitForEntryToCloseForTest( |
| 651 const std::string& key, |
| 652 const CompletionCallback& callback) { |
| 653 DCHECK(!callback.is_null()); |
| 654 if (disabled_ || key.empty()) |
| 655 return net::ERR_FAILED; |
| 656 |
| 657 uint32 hash = base::Hash(key); |
| 658 |
| 659 EntrySet entries = index_.LookupEntries(hash); |
| 660 if (!entries.cells.size()) |
| 661 return net::OK; |
| 662 |
| 663 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
| 664 return net::OK; |
| 665 |
| 666 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
| 667 if (open_entry) { |
| 668 open_entry->NotifyDestructionForTest(callback); |
| 669 open_entry->Close(); |
| 670 return net::ERR_IO_PENDING; |
| 671 } |
| 672 |
| 673 return net::OK; |
| 674 } |
| 675 |
| 676 int BackendImplV3::SelfCheck() { |
| 677 if (!init_) { |
| 678 LOG(ERROR) << "Init failed"; |
| 679 return ERR_INIT_FAILED; |
| 680 } |
| 681 |
| 682 /*int num_entries = rankings_.SelfCheck(); |
| 683 if (num_entries < 0) { |
| 684 LOG(ERROR) << "Invalid rankings list, error " << num_entries; |
| 685 #if !defined(NET_BUILD_STRESS_CACHE) |
| 686 return num_entries; |
| 687 #endif |
| 688 } |
| 689 |
| 690 if (num_entries != index_.header()->num_entries) { |
| 691 LOG(ERROR) << "Number of entries mismatch"; |
| 692 #if !defined(NET_BUILD_STRESS_CACHE) |
| 693 return ERR_NUM_ENTRIES_MISMATCH; |
| 694 #endif |
| 695 }*/ |
| 696 |
| 697 return CheckAllEntries(); |
| 698 } |
| 699 |
| 700 void BackendImplV3::GrowIndex() { |
| 701 if (growing_index_ || disabled_) |
| 702 return; |
| 703 growing_index_ = true; |
| 704 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_GROW_INDEX); |
| 705 PostWorkItem(work_item); |
| 706 } |
| 707 |
| 708 void BackendImplV3::SaveIndex(net::IOBuffer* buffer, int buffer_len) { |
| 709 if (disabled_ || !buffer_len) |
| 710 return; |
| 711 |
| 712 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_INDEX); |
| 713 work_item->set_buffer(buffer); |
| 714 work_item->set_buffer_len(buffer_len); |
| 715 work_item->set_offset(0); |
| 716 PostWorkItem(work_item); |
| 717 } |
| 718 |
| 719 void BackendImplV3::DeleteCell(EntryCell cell) { |
| 720 NOTREACHED(); |
| 721 // Post task to delete this cell. |
| 722 // look at a local map of cells being deleted. |
| 723 } |
| 724 |
| 725 void BackendImplV3::FixCell(EntryCell cell) { |
| 726 NOTREACHED(); |
| 727 } |
| 728 |
| 729 // ------------------------------------------------------------------------ |
| 730 |
| 731 net::CacheType BackendImplV3::GetCacheType() const { |
| 732 return cache_type_; |
| 733 } |
| 734 |
| 735 int32 BackendImplV3::GetEntryCount() const { |
| 736 if (disabled_) |
| 737 return 0; |
| 738 DCHECK(init_); |
| 739 return index_.header()->num_entries; |
| 740 } |
| 741 |
| 742 int BackendImplV3::OpenEntry(const std::string& key, Entry** entry, |
| 743 const CompletionCallback& callback) { |
| 744 DCHECK(!callback.is_null()); |
| 745 if (disabled_ || key.empty()) |
| 746 return net::ERR_FAILED; |
| 747 |
| 748 uint32 hash = base::Hash(key); |
| 749 Trace("Open hash 0x%x", hash); |
| 750 |
| 751 EntrySet entries = index_.LookupEntries(hash); |
| 752 if (!entries.cells.size()) |
| 753 return net::ERR_FAILED; |
| 754 |
| 755 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
| 756 return net::ERR_FAILED; |
| 757 |
| 758 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
| 759 if (open_entry) { |
| 760 *entry = open_entry; |
| 761 eviction_.OnOpenEntry(open_entry); |
| 762 entry_count_++; |
| 763 |
| 764 Trace("Open hash 0x%x end: 0x%x", hash, open_entry->GetAddress().value()); |
| 765 stats_.OnEvent(Stats::OPEN_HIT); |
| 766 SIMPLE_STATS_COUNTER("disk_cache.hit"); |
| 767 return net::OK; |
| 768 } |
| 769 |
| 770 // Read the entry from disk. |
| 771 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 772 work_item->set_entries(entries); |
| 773 work_item->set_user_callback(callback); |
| 774 work_item->set_key(key); |
| 775 work_item->set_entry_buffer(entry); |
| 776 PostWorkItem(work_item); |
| 777 |
| 778 return net::ERR_IO_PENDING; |
| 779 } |
| 780 |
| 781 int BackendImplV3::CreateEntry(const std::string& key, Entry** entry, |
| 782 const CompletionCallback& callback) { |
| 783 DCHECK(init_); |
| 784 DCHECK(!callback.is_null()); |
| 785 if (disabled_ || key.empty() || key.size() > kMaxKeySize) |
| 786 return net::ERR_FAILED; |
| 787 |
| 788 uint32 hash = base::Hash(key); |
| 789 Trace("Create hash 0x%x", hash); |
| 790 |
| 791 EntrySet entries = index_.LookupEntries(hash); |
| 792 if (entries.cells.size()) { |
| 793 if (entries.cells.size() != static_cast<size_t>(entries.evicted_count)) { |
| 794 // but we may have a hash collision :(. So create a work item to check it
here!. |
| 795 // kep collission specfic map |
| 796 return net::ERR_FAILED; |
| 797 } |
| 798 |
| 799 // On the other hand, we have only deleted items that we may resurrect. |
| 800 // Read the entry from disk. |
| 801 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 802 work_item->set_flags(WorkItem::WORK_FOR_RESURRECT); |
| 803 work_item->set_entries(entries); |
| 804 work_item->set_user_callback(callback); |
| 805 work_item->set_key(key); |
| 806 work_item->set_entry_buffer(entry); |
| 807 PostWorkItem(work_item); |
| 808 |
| 809 return net::ERR_IO_PENDING; |
| 810 } |
| 811 return OnCreateEntryComplete(key, hash, NULL, entry, callback); |
| 812 } |
| 813 |
| 814 int BackendImplV3::DoomEntry(const std::string& key, |
| 815 const CompletionCallback& callback) { |
| 816 DCHECK(!callback.is_null()); |
| 817 if (disabled_ || key.empty()) |
| 818 return net::ERR_FAILED; |
| 819 |
| 820 uint32 hash = base::Hash(key); |
| 821 Trace("DoomEntry hash 0x%x", hash); |
| 822 |
| 823 EntrySet entries = index_.LookupEntries(hash); |
| 824 if (!entries.cells.size()) |
| 825 return net::ERR_FAILED; |
| 826 |
| 827 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
| 828 return net::ERR_FAILED; |
| 829 |
| 830 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
| 831 if (open_entry) { |
| 832 open_entry->Doom(); |
| 833 open_entry->Close(); |
| 834 return net::OK; |
| 835 } |
| 836 |
| 837 // Read the entry from disk. |
| 838 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 839 work_item->set_flags(WorkItem::WORK_FOR_DOOM); |
| 840 work_item->set_entries(entries); |
| 841 work_item->set_user_callback(callback); |
| 842 work_item->set_key(key); |
| 843 PostWorkItem(work_item); |
| 844 |
| 845 return net::ERR_IO_PENDING; |
| 846 } |
| 847 |
| 848 int BackendImplV3::DoomAllEntries(const CompletionCallback& callback) { |
| 849 if (disabled_) |
| 850 return net::ERR_FAILED; |
| 851 |
| 852 // This is not really an error, but it is an interesting condition. |
| 853 ReportError(ERR_CACHE_DOOMED); |
| 854 stats_.OnEvent(Stats::DOOM_CACHE); |
| 855 if (!num_refs_) { |
| 856 RestartCache(callback); |
| 857 return init_ ? net::OK : net::ERR_IO_PENDING; |
| 858 } |
| 859 return eviction_.TrimAllCache(callback); |
| 860 } |
| 861 |
| 862 int BackendImplV3::DoomEntriesBetween(base::Time initial_time, |
| 863 base::Time end_time, |
| 864 const CompletionCallback& callback) { |
| 865 DCHECK_NE(net::APP_CACHE, cache_type_); |
| 866 Time now = GetTime(); |
| 867 if (end_time.is_null() || end_time > now) |
| 868 end_time = now; |
| 869 |
| 870 DCHECK(end_time >= initial_time); |
| 871 |
| 872 if (disabled_) |
| 873 return net::ERR_FAILED; |
| 874 |
| 875 scoped_ptr<IndexIterator> to_delete(new IndexIterator); |
| 876 to_delete->forward = false; |
| 877 to_delete->timestamp = index_.CalculateTimestamp(end_time) + 1; |
| 878 |
| 879 // Prepare to read the first entry from disk. |
| 880 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 881 work_item->set_flags(WorkItem::WORK_FOR_ITERATION | |
| 882 WorkItem::WORK_FOR_DOOM_RANGE); |
| 883 work_item->set_initial_time(initial_time); |
| 884 work_item->set_end_time(end_time); |
| 885 work_item->set_iterator(to_delete.Pass()); |
| 886 |
| 887 if (OpenNext(work_item) != net::ERR_IO_PENDING) |
| 888 return net::OK; |
| 889 |
| 890 work_item->set_user_callback(callback); |
| 891 return net::ERR_IO_PENDING; |
| 892 } |
| 893 |
| 894 int BackendImplV3::DoomEntriesSince(base::Time initial_time, |
| 895 const CompletionCallback& callback) { |
| 896 DCHECK_NE(net::APP_CACHE, cache_type_); |
| 897 return DoomEntriesBetween(initial_time, GetTime(), callback); |
| 898 } |
| 899 |
| 900 int BackendImplV3::OpenNextEntry(void** iter, Entry** next_entry, |
| 901 const CompletionCallback& callback) { |
| 902 DCHECK(!callback.is_null()); |
| 903 return OpenFollowingEntry(false, iter, next_entry, callback); |
| 904 } |
| 905 |
| 906 void BackendImplV3::EndEnumeration(void** iter) { |
| 907 scoped_ptr<IndexIterator> iterator( |
| 908 reinterpret_cast<IndexIterator*>(*iter)); |
| 909 *iter = NULL; |
| 910 } |
| 911 |
| 912 void BackendImplV3::GetStats(StatsItems* stats) { |
| 913 if (disabled_) |
| 914 return; |
| 915 |
| 916 std::pair<std::string, std::string> item; |
| 917 |
| 918 item.first = "Entries"; |
| 919 item.second = base::StringPrintf("%d", index_.header()->num_entries); |
| 920 stats->push_back(item); |
| 921 |
| 922 item.first = "Max size"; |
| 923 item.second = base::StringPrintf("%d", max_size_); |
| 924 stats->push_back(item); |
| 925 |
| 926 item.first = "Current size"; |
| 927 item.second = base::StringPrintf("%d", index_.header()->num_bytes); |
| 928 stats->push_back(item); |
| 929 |
| 930 stats_.GetItems(stats); |
| 931 } |
| 932 |
| 933 void BackendImplV3::OnExternalCacheHit(const std::string& key) { |
| 934 if (disabled_ || key.empty()) |
| 935 return; |
| 936 |
| 937 uint32 hash = base::Hash(key); |
| 938 EntrySet entries = index_.LookupEntries(hash); |
| 939 if (!entries.cells.size()) |
| 940 return; |
| 941 |
| 942 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
| 943 return; |
| 944 |
| 945 for (size_t i = 0; i < entries.cells.size(); i++) { |
| 946 if (entries.cells[i].GetGroup() == ENTRY_EVICTED) |
| 947 continue; |
| 948 |
| 949 index_.UpdateTime(hash, entries.cells[i].GetAddress(), GetTime()); |
| 950 } |
| 951 |
| 952 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
| 953 if (open_entry) { |
| 954 eviction_.OnOpenEntry(open_entry); |
| 955 entry_count_++; |
| 956 UpdateRank(open_entry, true); |
| 957 open_entry->Close(); |
| 958 return; |
| 959 } |
| 960 |
| 961 if (user_flags_ & UNIT_TEST_MODE) { |
| 962 for (size_t i = 0; i < entries.cells.size(); i++) { |
| 963 // This method doesn't have a callback, and it may take a while for the |
| 964 // operation to complete so update the time of any entry with this hash. |
| 965 if (entries.cells[i].GetGroup() != ENTRY_EVICTED) { |
| 966 index_.UpdateTime(hash, entries.cells[i].GetAddress(), |
| 967 GetTime()); |
| 968 } |
| 969 } |
| 970 } |
| 971 |
| 972 // Read the entry from disk. |
| 973 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 974 work_item->set_flags(WorkItem::WORK_FOR_UPDATE); |
| 975 work_item->set_entries(entries); |
| 976 work_item->set_key(key); |
| 977 PostWorkItem(work_item); |
| 978 } |
| 979 |
| 980 // ------------------------------------------------------------------------ |
| 981 |
| 982 // The maximum cache size will be either set explicitly by the caller, or |
| 983 // calculated by this code. |
| 984 void BackendImplV3::AdjustMaxCacheSize() { |
| 985 if (max_size_) |
| 986 return; |
| 987 |
| 988 // The user is not setting the size, let's figure it out. |
| 989 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); |
| 990 if (available < 0) { |
| 991 max_size_ = kDefaultCacheSize; |
| 992 return; |
| 993 } |
| 994 |
| 995 available += index_.header()->num_bytes; |
| 996 |
| 997 max_size_ = PreferedCacheSize(available); |
| 998 |
| 999 // Let's not use more than the default size while we tune-up the performance |
| 1000 // of bigger caches. TODO(rvargas): remove this limit. |
| 1001 if (max_size_ > kDefaultCacheSize * 4) |
| 1002 max_size_ = kDefaultCacheSize * 4; |
| 1003 } |
| 1004 |
| 1005 bool BackendImplV3::InitStats(void* stats_data) { |
| 1006 Addr address(index_.header()->stats); |
| 1007 int size = stats_.StorageSize(); |
| 1008 |
| 1009 if (!address.is_initialized()) { |
| 1010 FileType file_type = Addr::RequiredFileType(size); |
| 1011 DCHECK_NE(file_type, EXTERNAL); |
| 1012 int num_blocks = Addr::RequiredBlocks(size, file_type); |
| 1013 |
| 1014 if (!CreateBlock(file_type, num_blocks, &address)) |
| 1015 return false; |
| 1016 return stats_.Init(NULL, 0, address); |
| 1017 } |
| 1018 |
| 1019 // Load the required data. |
| 1020 DCHECK(address.is_block_file()); |
| 1021 size = address.num_blocks() * address.BlockSize(); |
| 1022 |
| 1023 if (!stats_.Init(stats_data, size, address)) |
| 1024 return false; |
| 1025 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain()) |
| 1026 stats_.InitSizeHistogram(); |
| 1027 return true; |
| 1028 } |
| 1029 |
| 1030 void BackendImplV3::StoreStats() { |
| 1031 int size = stats_.StorageSize(); |
| 1032 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size)); |
| 1033 Addr address; |
| 1034 size = stats_.SerializeStats(buffer->data(), size, &address); |
| 1035 DCHECK(size); |
| 1036 if (!address.is_initialized()) |
| 1037 return; |
| 1038 |
| 1039 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_DATA); |
| 1040 work_item->set_buffer(buffer); |
| 1041 work_item->set_buffer_len(size); |
| 1042 work_item->set_address(address); |
| 1043 work_item->set_offset(0); |
| 1044 PostWorkItem(work_item); |
| 1045 } |
| 1046 |
| 1047 void BackendImplV3::RestartCache(const CompletionCallback& callback) { |
| 1048 PrepareForRestart(); |
| 1049 |
| 1050 // Don't call Init() if directed by the unit test: we are simulating a failure |
| 1051 // trying to re-enable the cache. |
| 1052 if (user_flags_ & UNIT_TEST_MODE) { |
| 1053 init_ = true; // Let the destructor do proper cleanup. |
| 1054 } else { |
| 1055 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_RESTART); |
| 1056 work_item->set_user_callback(callback); |
| 1057 work_item->set_flags(user_flags_); |
| 1058 PostWorkItem(work_item); |
| 1059 } |
| 1060 } |
| 1061 |
| 1062 void BackendImplV3::PrepareForRestart() { |
| 1063 if (!(user_flags_ & EVICTION_V2)) |
| 1064 lru_eviction_ = true; |
| 1065 |
| 1066 disabled_ = true; |
| 1067 index_.header()->crash = 0; |
| 1068 block_files_.Clear(); |
| 1069 index_.Reset(); |
| 1070 init_ = false; |
| 1071 restarted_ = true; |
| 1072 } |
| 1073 |
| 1074 void BackendImplV3::CleanupCache() { |
| 1075 Trace("Backend Cleanup"); |
| 1076 //eviction_.Stop(); |
| 1077 timer_.reset(); |
| 1078 |
| 1079 if (init_) { |
| 1080 if (!(user_flags_ & NO_CLEAN_ON_EXIT)) { |
| 1081 StoreStats(); |
| 1082 CloseDoomedEntries(); |
| 1083 ReleaseRecentEntries(); |
| 1084 UpdateDeletedEntries(); |
| 1085 index_.OnBackupTimer(); |
| 1086 } |
| 1087 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLEANUP); |
| 1088 PostWorkItem(work_item); |
| 1089 worker_ = NULL; |
| 1090 } |
| 1091 ptr_factory_.InvalidateWeakPtrs(); |
| 1092 } |
| 1093 |
| 1094 int BackendImplV3::NewEntry(WorkItem* work_item, EntryImplV3** entry) { |
| 1095 Addr address = |
| 1096 work_item->entries()->cells[work_item->entries()->current].GetAddress(); |
| 1097 |
| 1098 // The entry could have been opened since this task was posted to the cache |
| 1099 // thread, so let's check again. |
| 1100 EntryImplV3* this_entry = GetOpenEntry(address); |
| 1101 if (this_entry) { |
| 1102 // Easy job. This entry is already in memory. |
| 1103 *entry = this_entry; |
| 1104 return 0; |
| 1105 } |
| 1106 |
| 1107 // Even if the entry is not in memory right now, it could have changed. Note |
| 1108 // that any state other than USED means we are either deleting this entry or |
| 1109 // it should be in memory. |
| 1110 uint32 hash = |
| 1111 work_item->entries()->cells[work_item->entries()->current].hash(); |
| 1112 EntryCell cell = index_.FindEntryCell(hash, address); |
| 1113 if (!cell.IsValid() || cell.GetState() != ENTRY_USED) |
| 1114 return ERR_INVALID_ENTRY; |
| 1115 |
| 1116 STRESS_DCHECK(block_files_.IsValid(address)); |
| 1117 |
| 1118 if (!address.SanityCheckForEntryV3()) { |
| 1119 LOG(WARNING) << "Wrong entry address."; |
| 1120 STRESS_NOTREACHED(); |
| 1121 return ERR_INVALID_ADDRESS; |
| 1122 } |
| 1123 |
| 1124 scoped_refptr<EntryImplV3> cache_entry; |
| 1125 if (address.file_type() == BLOCK_EVICTED) { |
| 1126 cache_entry = new EntryImplV3(this, address, work_item->key(), |
| 1127 work_item->short_entry_record().Pass()); |
| 1128 } else { |
| 1129 cache_entry = new EntryImplV3(this, address, work_item->key(), |
| 1130 work_item->entry_record().Pass()); |
| 1131 } |
| 1132 IncreaseNumRefs(); |
| 1133 *entry = NULL; |
| 1134 |
| 1135 if (!cache_entry->SanityCheck()) { |
| 1136 LOG(WARNING) << "Messed up entry found."; |
| 1137 STRESS_NOTREACHED(); |
| 1138 return ERR_INVALID_ENTRY; |
| 1139 } |
| 1140 |
| 1141 STRESS_DCHECK(block_files_.IsValid( |
| 1142 Addr(cache_entry->entry()->Data()->rankings_node))); |
| 1143 |
| 1144 if (!cache_entry->DataSanityCheck()) {//--------------------------------------
-------- |
| 1145 // just one path? make sure we delete the cell in the first case, and as muc
h data as we can here |
| 1146 LOG(WARNING) << "Messed up entry found."; |
| 1147 cache_entry->FixForDelete(); |
| 1148 } |
| 1149 |
| 1150 open_entries_[address.value()] = cache_entry; |
| 1151 index_.SetSate(cache_entry->GetHash(), address, ENTRY_OPEN); |
| 1152 |
| 1153 cache_entry->BeginLogging(net_log_, false); |
| 1154 cache_entry->OnOpenEntry(); |
| 1155 cache_entry.swap(entry); |
| 1156 return 0; |
| 1157 } |
| 1158 |
| 1159 EntryImplV3* BackendImplV3::LookupOpenEntry(const EntrySet& entries, |
| 1160 const std::string key) { |
| 1161 for (size_t i = 0; i < entries.cells.size(); i++) { |
| 1162 if (entries.cells[i].GetGroup() == ENTRY_EVICTED) |
| 1163 continue; |
| 1164 |
| 1165 EntryImplV3* this_entry = GetOpenEntry(entries.cells[i].GetAddress()); |
| 1166 if (this_entry && this_entry->GetKey() == key) |
| 1167 return this_entry; |
| 1168 } |
| 1169 return NULL; |
| 1170 } |
| 1171 |
| 1172 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. |
| 1173 int BackendImplV3::OpenFollowingEntry(bool forward, void** iter, |
| 1174 Entry** next_entry, |
| 1175 const CompletionCallback& callback) { |
| 1176 if (disabled_) |
| 1177 return net::ERR_FAILED; |
| 1178 |
| 1179 DCHECK(iter); |
| 1180 |
| 1181 scoped_ptr<IndexIterator> iterator( |
| 1182 reinterpret_cast<IndexIterator*>(*iter)); |
| 1183 *iter = NULL; |
| 1184 |
| 1185 if (!iterator.get()) { |
| 1186 iterator.reset(new IndexIterator); |
| 1187 iterator->timestamp = index_.CalculateTimestamp(GetTime()) + 1; |
| 1188 iterator->forward = forward; |
| 1189 } |
| 1190 |
| 1191 // Prepare to read the first entry from disk. |
| 1192 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 1193 work_item->set_flags(WorkItem::WORK_FOR_ITERATION); |
| 1194 work_item->set_iterator(iterator.Pass()); |
| 1195 work_item->set_iter_buffer(iter); |
| 1196 work_item->set_entry_buffer(next_entry); |
| 1197 |
| 1198 int rv = OpenNext(work_item); |
| 1199 if (rv == net::ERR_IO_PENDING) |
| 1200 work_item->set_user_callback(callback); |
| 1201 |
| 1202 return rv; |
| 1203 } |
| 1204 |
| 1205 bool BackendImplV3::GetMoreCells(WorkItem* work_item) { |
| 1206 DCHECK(work_item->flags() & WorkItem::WORK_FOR_ITERATION); |
| 1207 IndexIterator* iterator = work_item->iterator(); |
| 1208 |
| 1209 if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) { |
| 1210 int lower_limit = index_.CalculateTimestamp(work_item->initial_time()); |
| 1211 if (iterator->timestamp <= lower_limit || |
| 1212 !index_.GetNextCells(iterator)) { |
| 1213 return false; |
| 1214 } |
| 1215 return true; |
| 1216 } |
| 1217 |
| 1218 return index_.GetNextCells(iterator); |
| 1219 } |
| 1220 |
| 1221 int BackendImplV3::OpenNext(WorkItem* work_item) { |
| 1222 Trace("OpenNext work item 0x%p", work_item); |
| 1223 CellList* cells = &work_item->iterator()->cells; |
| 1224 EntrySet entries; |
| 1225 for (;;) { |
| 1226 if (cells->empty()) { |
| 1227 if (!GetMoreCells(work_item)) { |
| 1228 UpdateIterator(NULL, work_item); |
| 1229 return net::ERR_FAILED; |
| 1230 } |
| 1231 DCHECK(!cells->empty()); |
| 1232 } |
| 1233 |
| 1234 while (!cells->empty()) { |
| 1235 EntryCell last_cell = index_.FindEntryCell(cells->back().hash, |
| 1236 cells->back().address); |
| 1237 cells->pop_back(); |
| 1238 if (!last_cell.IsValid()) |
| 1239 continue; |
| 1240 |
| 1241 entries.cells.push_back(last_cell); |
| 1242 |
| 1243 // See if the entry is currently open. |
| 1244 EntryImplV3* this_entry = GetOpenEntry(last_cell.GetAddress()); |
| 1245 if (this_entry) { |
| 1246 if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) { |
| 1247 Doom(this_entry, work_item); |
| 1248 continue; |
| 1249 } else { |
| 1250 UpdateIterator(this_entry, work_item); |
| 1251 return net::OK; |
| 1252 } |
| 1253 } |
| 1254 |
| 1255 work_item->set_entries(entries); |
| 1256 PostWorkItem(work_item); |
| 1257 return net::ERR_IO_PENDING; |
| 1258 } |
| 1259 } |
| 1260 } |
| 1261 |
| 1262 void BackendImplV3::Doom(EntryImplV3* entry, WorkItem* work_item) { |
| 1263 if (entry->GetLastUsed() >= work_item->initial_time() && |
| 1264 entry->GetLastUsed() < work_item->end_time()) { |
| 1265 Trace("Doom 0x%p work item 0x%p", entry, work_item); |
| 1266 entry->Doom(); |
| 1267 } |
| 1268 entry->Close(); |
| 1269 } |
| 1270 |
| 1271 void BackendImplV3::UpdateIterator(EntryImplV3* entry, WorkItem* work_item) { |
| 1272 int result; |
| 1273 if (entry) { |
| 1274 result = net::OK; |
| 1275 *work_item->iter_buffer() = work_item->ReleaseIterator(); |
| 1276 *work_item->entry_buffer() = entry; |
| 1277 } else if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) { |
| 1278 result = net::OK; |
| 1279 } else { |
| 1280 result = net::ERR_FAILED; |
| 1281 *work_item->iter_buffer() = NULL; |
| 1282 *work_item->entry_buffer() = entry; |
| 1283 } |
| 1284 |
| 1285 if (!work_item->user_callback().is_null()) |
| 1286 work_item->user_callback().Run(result); |
| 1287 } |
| 1288 |
| 1289 void BackendImplV3::CloseDoomedEntries() { |
| 1290 // Copy the current map to make sure no new entries are deleted. |
| 1291 EntriesMap to_delete(entries_to_delete_); |
| 1292 for (EntriesMap::iterator it = to_delete.begin(); |
| 1293 it != to_delete.end(); ++it) { |
| 1294 it->second->Close(); |
| 1295 } |
| 1296 } |
| 1297 |
| 1298 void BackendImplV3::ReleaseRecentEntries() { |
| 1299 for (EntriesSet::iterator it = recent_entries_.begin(); |
| 1300 it != recent_entries_.end(); ++it) { |
| 1301 (*it)->Release(); |
| 1302 } |
| 1303 recent_entries_.clear(); |
| 1304 } |
| 1305 |
| 1306 void BackendImplV3::UpdateDeletedEntries() { |
| 1307 for (size_t i = 0; i < deleted_entries_.size(); i++) { |
| 1308 CellInfo& cell_info = deleted_entries_[i]; |
| 1309 index_.SetSate(cell_info.hash, cell_info.address, ENTRY_FREE); |
| 1310 } |
| 1311 deleted_entries_.clear(); |
| 1312 } |
| 1313 |
| 1314 void BackendImplV3::AddStorageSize(int32 bytes) { |
| 1315 index_.header()->num_bytes += bytes; |
| 1316 DCHECK_GE(index_.header()->num_bytes, 0); |
| 1317 } |
| 1318 |
| 1319 void BackendImplV3::SubstractStorageSize(int32 bytes) { |
| 1320 index_.header()->num_bytes -= bytes; |
| 1321 DCHECK_GE(index_.header()->num_bytes, 0); |
| 1322 } |
| 1323 |
| 1324 void BackendImplV3::IncreaseNumRefs() { |
| 1325 num_refs_++; |
| 1326 if (max_refs_ < num_refs_) |
| 1327 max_refs_ = num_refs_; |
| 1328 } |
| 1329 |
| 1330 void BackendImplV3::DecreaseNumRefs() { |
| 1331 DCHECK(num_refs_); |
| 1332 num_refs_--; |
| 1333 } |
| 1334 |
| 1335 void BackendImplV3::IncreaseNumEntries() { |
| 1336 index_.header()->num_entries++; |
| 1337 DCHECK_GT(index_.header()->num_entries, 0); |
| 1338 } |
| 1339 |
| 1340 void BackendImplV3::DecreaseNumEntries() { |
| 1341 index_.header()->num_entries--; |
| 1342 if (index_.header()->num_entries < 0) { |
| 1343 NOTREACHED(); |
| 1344 index_.header()->num_entries = 0; |
| 1345 } |
| 1346 } |
| 1347 |
| 1348 void BackendImplV3::PostWorkItem(WorkItem* work_item) { |
| 1349 if (!worker_) |
| 1350 return; |
| 1351 Trace("Post task 0x%p %d flags 0x%x", work_item, work_item->type(), |
| 1352 work_item->flags()); |
| 1353 |
| 1354 // Long story short: we expect to see the work item back on this thread. |
| 1355 // If the task is not executed we'll leak work_item, but that should only |
| 1356 // happen at shutdown. |
| 1357 work_item->AddRef(); |
| 1358 work_item->set_closure(base::Bind(&BackendImplV3::OnWorkDone, |
| 1359 ptr_factory_.GetWeakPtr())); |
| 1360 cache_thread_->PostTask( |
| 1361 FROM_HERE, |
| 1362 base::Bind(&BackendImplV3::Worker::OnDoWork, worker_, work_item)); |
| 1363 } |
| 1364 |
| 1365 void BackendImplV3::OnWorkDone(WorkItem* work_item) { |
| 1366 Trace("Task done 0x%p %d flags 0x%x", work_item, work_item->type(), |
| 1367 work_item->flags()); |
| 1368 // Balance the reference from PostWorkItem. |
| 1369 scoped_refptr<WorkItem> my_work_item; |
| 1370 my_work_item.swap(&work_item); |
| 1371 |
| 1372 if (!worker_) { |
| 1373 // This may be called after CleanupForTest was called. |
| 1374 if (!my_work_item->user_callback().is_null()) |
| 1375 my_work_item->user_callback().Run(my_work_item->result()); |
| 1376 return; |
| 1377 } |
| 1378 |
| 1379 switch (my_work_item->type()) { |
| 1380 case WorkItem::WORK_INIT: return OnInitComplete(my_work_item); |
| 1381 case WorkItem::WORK_RESTART: return OnInitComplete(my_work_item); |
| 1382 case WorkItem::WORK_GROW_INDEX: return OnGrowIndexComplete(my_work_item); |
| 1383 case WorkItem::WORK_GROW_FILES: return OnGrowFilesComplete(my_work_item); |
| 1384 case WorkItem::WORK_OPEN_ENTRY: return OnOpenEntryComplete(my_work_item); |
| 1385 default: return OnOperationComplete(my_work_item); |
| 1386 } |
| 1387 } |
| 1388 |
| 1389 void BackendImplV3::OnInitComplete(WorkItem* work_item) { |
| 1390 int rv = work_item->result(); |
| 1391 if (rv != ERR_NO_ERROR && rv != ERR_CACHE_CREATED && |
| 1392 rv != ERR_PREVIOUS_CRASH) { |
| 1393 ReportError(rv); |
| 1394 return work_item->user_callback().Run(net::ERR_FAILED); |
| 1395 } |
| 1396 |
| 1397 #if defined(NET_BUILD_STRESS_CACHE) |
| 1398 // Start evictions right away. |
| 1399 up_ticks_ = kTrimDelay * 2; |
| 1400 #endif |
| 1401 DCHECK(!init_); |
| 1402 |
| 1403 num_refs_ = max_refs_ = 0; |
| 1404 entry_count_ = byte_count_ = 0; |
| 1405 |
| 1406 if (!restarted_) { |
| 1407 buffer_bytes_ = 0; |
| 1408 trace_object_ = TraceObject::GetTraceObject(); |
| 1409 // Create a recurrent timer of 30 secs (90 minutes for tests). |
| 1410 int timer_delay = user_flags_ & BASIC_UNIT_TEST ? 90 * 60 * 1000 : |
| 1411 kTimerSeconds * 1000; |
| 1412 timer_.reset(new base::RepeatingTimer<BackendImplV3>()); |
| 1413 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, |
| 1414 &BackendImplV3::OnTimerTick); |
| 1415 } |
| 1416 Trace("Init"); |
| 1417 init_ = true; |
| 1418 |
| 1419 scoped_ptr<InitResult> result = work_item->init_result(); |
| 1420 index_.Init(&result.get()->index_data); |
| 1421 |
| 1422 if (index_.header()->experiment != NO_EXPERIMENT && |
| 1423 cache_type_ != net::DISK_CACHE) { |
| 1424 // No experiment for other caches. |
| 1425 ReportError(ERR_INIT_FAILED); |
| 1426 return work_item->user_callback().Run(net::ERR_FAILED); |
| 1427 } |
| 1428 |
| 1429 if (!(user_flags_ & BASIC_UNIT_TEST)) { |
| 1430 // The unit test controls directly what to test. |
| 1431 lru_eviction_ = (cache_type_ != net::DISK_CACHE); |
| 1432 } |
| 1433 |
| 1434 if (!CheckIndex()) { |
| 1435 ReportError(ERR_INIT_FAILED); |
| 1436 return work_item->user_callback().Run(net::ERR_FAILED); |
| 1437 } |
| 1438 AdjustMaxCacheSize(); |
| 1439 |
| 1440 block_files_.Init(result->block_bitmaps); |
| 1441 |
| 1442 // We want to minimize the changes to cache for an AppCache. |
| 1443 if (cache_type() == net::APP_CACHE) { |
| 1444 DCHECK(lru_eviction_); |
| 1445 read_only_ = true; |
| 1446 } else if (cache_type() == net::SHADER_CACHE) { |
| 1447 DCHECK(lru_eviction_); |
| 1448 } |
| 1449 |
| 1450 eviction_.Init(this); |
| 1451 |
| 1452 int64 errors, full_dooms, partial_dooms, last_report; |
| 1453 errors = full_dooms = partial_dooms = last_report = 0; |
| 1454 if (work_item->type() == WorkItem::WORK_RESTART) { |
| 1455 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); |
| 1456 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE); |
| 1457 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT); |
| 1458 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); |
| 1459 } |
| 1460 |
| 1461 if (!InitStats(result->stats_data.get())) { |
| 1462 ReportError(ERR_INIT_FAILED); |
| 1463 return work_item->user_callback().Run(net::ERR_FAILED); |
| 1464 } |
| 1465 |
| 1466 disabled_ = false; |
| 1467 |
| 1468 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) |
| 1469 trace_object_->EnableTracing(false); |
| 1470 int sc = SelfCheck(); |
| 1471 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) |
| 1472 NOTREACHED(); |
| 1473 trace_object_->EnableTracing(true); |
| 1474 #endif |
| 1475 |
| 1476 if (work_item->type() == WorkItem::WORK_RESTART) { |
| 1477 stats_.SetCounter(Stats::FATAL_ERROR, errors); |
| 1478 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms); |
| 1479 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms); |
| 1480 stats_.SetCounter(Stats::LAST_REPORT, last_report); |
| 1481 } |
| 1482 |
| 1483 ReportError(rv); |
| 1484 return work_item->user_callback().Run(net::OK); |
| 1485 } |
| 1486 |
| 1487 void BackendImplV3::OnGrowIndexComplete(WorkItem* work_item) { |
| 1488 if (work_item->result() != ERR_NO_ERROR || disabled_ || |
| 1489 (work_item->flags() & WorkItem::WORK_COMPLETE)) { |
| 1490 growing_index_ = false; |
| 1491 return; |
| 1492 } |
| 1493 |
| 1494 scoped_ptr<InitResult> result = work_item->init_result(); |
| 1495 index_.Init(&result.get()->index_data); |
| 1496 work_item->set_flags(WorkItem::WORK_COMPLETE); |
| 1497 PostWorkItem(work_item); |
| 1498 } |
| 1499 |
| 1500 void BackendImplV3::OnGrowFilesComplete(WorkItem* work_item) { |
| 1501 if (work_item->result() != ERR_NO_ERROR || disabled_ || |
| 1502 (work_item->flags() & WorkItem::WORK_COMPLETE)) { |
| 1503 growing_files_ = false; |
| 1504 return; |
| 1505 } |
| 1506 |
| 1507 scoped_ptr<InitResult> result = work_item->init_result(); |
| 1508 block_files_.Init(result->block_bitmaps); |
| 1509 work_item->set_flags(WorkItem::WORK_COMPLETE); |
| 1510 PostWorkItem(work_item); |
| 1511 } |
| 1512 |
| 1513 void BackendImplV3::OnOperationComplete(WorkItem* work_item) { |
| 1514 if (work_item->result() < 0 && work_item->owner_entry()) { |
| 1515 // Make sure that there's a call to Close() after Doom(). |
| 1516 work_item->owner_entry()->AddRef(); |
| 1517 work_item->owner_entry()->Doom(); |
| 1518 work_item->owner_entry()->Close(); |
| 1519 } |
| 1520 |
| 1521 if (!work_item->user_callback().is_null()) |
| 1522 work_item->user_callback().Run(work_item->result()); |
| 1523 } |
| 1524 |
| 1525 |
| 1526 void BackendImplV3::OnOpenEntryComplete(WorkItem* work_item) { |
| 1527 Trace("Open complete"); |
| 1528 if (work_item->flags() & WorkItem::WORK_FOR_RESURRECT) |
| 1529 return OnOpenForResurrectComplete(work_item); |
| 1530 |
| 1531 if (work_item->flags() & WorkItem::WORK_FOR_EVICT) |
| 1532 return OnEvictEntryComplete(work_item); |
| 1533 |
| 1534 if (work_item->flags() & WorkItem::WORK_FOR_ITERATION) |
| 1535 return OnOpenNextComplete(work_item); |
| 1536 |
| 1537 if (work_item->result() == ERR_NO_ERROR) { |
| 1538 EntryImplV3* entry; |
| 1539 int error = NewEntry(work_item, &entry); |
| 1540 if (!error) { |
| 1541 if (work_item->flags() & WorkItem::WORK_FOR_DOOM) { |
| 1542 entry->Doom(); |
| 1543 entry->Close(); |
| 1544 } else { |
| 1545 eviction_.OnOpenEntry(entry); |
| 1546 entry_count_++; |
| 1547 if (work_item->flags() & WorkItem::WORK_FOR_UPDATE) { |
| 1548 UpdateRank(entry, true); |
| 1549 return; |
| 1550 } |
| 1551 *work_item->entry_buffer() = entry; |
| 1552 |
| 1553 Trace("Open hash 0x%x end: 0x%x", entry->GetHash(), |
| 1554 entry->GetAddress().value()); |
| 1555 stats_.OnEvent(Stats::OPEN_HIT); |
| 1556 SIMPLE_STATS_COUNTER("disk_cache.hit"); |
| 1557 } |
| 1558 |
| 1559 work_item->user_callback().Run(net::OK); |
| 1560 return; |
| 1561 } |
| 1562 } |
| 1563 |
| 1564 if (work_item->entries()->current >= work_item->entries()->cells.size() - 1) {
// - 1? |
| 1565 // Not found. |
| 1566 work_item->user_callback().Run(net::ERR_FAILED); |
| 1567 return; |
| 1568 } |
| 1569 |
| 1570 //+post a task to delete the cell |
| 1571 |
| 1572 // Open the next entry on the list. |
| 1573 work_item->entries()->current++; |
| 1574 if (work_item->entries()->current < work_item->entries()->cells.size()) |
| 1575 PostWorkItem(work_item); |
| 1576 } |
| 1577 |
| 1578 void BackendImplV3::OnOpenForResurrectComplete(WorkItem* work_item) { |
| 1579 if (work_item->result() == ERR_NO_ERROR) { |
| 1580 EntryImplV3* deleted_entry; |
| 1581 int error = NewEntry(work_item, &deleted_entry); |
| 1582 if (!error) { |
| 1583 scoped_ptr<ShortEntryRecord> entry_record = |
| 1584 deleted_entry->GetShortEntryRecord(); |
| 1585 CHECK(entry_record); |
| 1586 if (!entry_record) { |
| 1587 // This is an active entry. |
| 1588 deleted_entry->Close(); |
| 1589 stats_.OnEvent(Stats::CREATE_MISS); |
| 1590 Trace("create entry miss "); |
| 1591 work_item->user_callback().Run(net::ERR_FAILED);//doesn't make any sense |
| 1592 return; |
| 1593 } |
| 1594 |
| 1595 // We are attempting to create an entry and found out that the entry was |
| 1596 // previously deleted. |
| 1597 |
| 1598 stats_.OnEvent(Stats::RESURRECT_HIT); |
| 1599 Trace("Resurrect entry hit "); |
| 1600 deleted_entry->Doom(); |
| 1601 deleted_entry->Close(); |
| 1602 |
| 1603 int rv = |
| 1604 OnCreateEntryComplete(work_item->key(), deleted_entry->GetHash(), |
| 1605 entry_record.get(), work_item->entry_buffer(), |
| 1606 work_item->user_callback()); |
| 1607 DCHECK_EQ(rv, net::OK); |
| 1608 return; |
| 1609 } |
| 1610 } |
| 1611 |
| 1612 if (work_item->entries()->current >= work_item->entries()->cells.size()) { |
| 1613 // Not found. |
| 1614 work_item->user_callback().Run(net::ERR_FAILED); |
| 1615 return; |
| 1616 } |
| 1617 |
| 1618 //+post a task to delete the cell |
| 1619 |
| 1620 // Open the next entry on the list. |
| 1621 work_item->entries()->current++; |
| 1622 if (work_item->entries()->current < work_item->entries()->cells.size()) |
| 1623 PostWorkItem(work_item); |
| 1624 } |
| 1625 |
| 1626 void BackendImplV3::OnEvictEntryComplete(WorkItem* work_item) { |
| 1627 if (work_item->result() != ERR_NO_ERROR) |
| 1628 return eviction_.OnEvictEntryComplete(); |
| 1629 |
| 1630 EntryCell old_cell = |
| 1631 index_.FindEntryCell(work_item->entries()->cells[0].hash(), |
| 1632 work_item->entries()->cells[0].GetAddress()); |
| 1633 DCHECK(old_cell.IsValid()); |
| 1634 |
| 1635 if (!(work_item->flags() & WorkItem::WORK_NO_COPY)) { |
| 1636 EntryCell new_cell = |
| 1637 index_.FindEntryCell(work_item->entries()->cells[1].hash(), |
| 1638 work_item->entries()->cells[1].GetAddress()); |
| 1639 DCHECK(new_cell.IsValid()); |
| 1640 } |
| 1641 |
| 1642 EntryImplV3* entry; |
| 1643 int error = NewEntry(work_item, &entry); |
| 1644 if (!error) { |
| 1645 entry->Doom(); |
| 1646 entry->Close(); |
| 1647 } |
| 1648 |
| 1649 //+delete old_cell after a timer (so add to deleted entries). |
| 1650 |
| 1651 eviction_.OnEvictEntryComplete(); |
| 1652 } |
| 1653 |
| 1654 void BackendImplV3::OnOpenNextComplete(WorkItem* work_item) { |
| 1655 Trace("OpenNext complete, work item 0x%p", work_item); |
| 1656 if (work_item->result() != ERR_NO_ERROR) { |
| 1657 OpenNext(work_item); // Ignore result. |
| 1658 return; |
| 1659 } |
| 1660 |
| 1661 EntryImplV3* entry; |
| 1662 int error = NewEntry(work_item, &entry); |
| 1663 if (!error) { |
| 1664 if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) |
| 1665 Doom(entry, work_item); |
| 1666 else |
| 1667 return UpdateIterator(entry, work_item); |
| 1668 } |
| 1669 |
| 1670 // Grab another entry. |
| 1671 OpenNext(work_item); // Ignore result. |
| 1672 } |
| 1673 |
| 1674 int BackendImplV3::OnCreateEntryComplete(const std::string& key, uint32 hash, |
| 1675 ShortEntryRecord* short_record, |
| 1676 Entry** entry, |
| 1677 const CompletionCallback& callback) { |
| 1678 // Create a new object in memory and return it to the caller. |
| 1679 Addr entry_address; |
| 1680 Trace("Create complete hash 0x%x", hash); |
| 1681 if (!block_files_.CreateBlock(BLOCK_ENTRIES, 1, &entry_address)) { |
| 1682 LOG(ERROR) << "Create entry failed " << key.c_str(); |
| 1683 stats_.OnEvent(Stats::CREATE_ERROR); |
| 1684 return net::ERR_FAILED; |
| 1685 } |
| 1686 |
| 1687 EntryCell cell = index_.CreateEntryCell(hash, entry_address); |
| 1688 if (!cell.IsValid()) { |
| 1689 block_files_.DeleteBlock(entry_address); |
| 1690 return net::ERR_FAILED; |
| 1691 } |
| 1692 |
| 1693 scoped_refptr<EntryImplV3> cache_entry( |
| 1694 new EntryImplV3(this, cell.GetAddress(), false)); |
| 1695 IncreaseNumRefs(); |
| 1696 |
| 1697 cache_entry->CreateEntry(key, hash, short_record); |
| 1698 cache_entry->BeginLogging(net_log_, true); |
| 1699 |
| 1700 // We are not failing the operation; let's add this to the map. |
| 1701 open_entries_[cell.GetAddress().value()] = cache_entry; |
| 1702 |
| 1703 IncreaseNumEntries(); |
| 1704 entry_count_++; |
| 1705 |
| 1706 if (short_record) |
| 1707 eviction_.OnResurrectEntry(cache_entry); |
| 1708 else |
| 1709 eviction_.OnCreateEntry(cache_entry); |
| 1710 |
| 1711 stats_.OnEvent(Stats::CREATE_HIT); |
| 1712 SIMPLE_STATS_COUNTER("disk_cache.miss"); |
| 1713 Trace("create entry hit "); |
| 1714 cache_entry->AddRef(); |
| 1715 *entry = cache_entry.get(); |
| 1716 |
| 1717 if (short_record) |
| 1718 callback.Run(net::OK); |
| 1719 |
| 1720 return net::OK; |
| 1721 } |
| 1722 |
| 1723 void BackendImplV3::LogStats() { |
| 1724 StatsItems stats; |
| 1725 GetStats(&stats); |
| 1726 |
| 1727 for (size_t index = 0; index < stats.size(); index++) |
| 1728 VLOG(1) << stats[index].first << ": " << stats[index].second; |
| 1729 } |
| 1730 |
| 1731 void BackendImplV3::ReportStats() { |
| 1732 IndexHeaderV3* header = index_.header(); |
| 1733 CACHE_UMA(COUNTS, "Entries", header->num_entries); |
| 1734 |
| 1735 int current_size = header->num_bytes / (1024 * 1024); |
| 1736 int max_size = max_size_ / (1024 * 1024); |
| 1737 |
| 1738 CACHE_UMA(COUNTS_10000, "Size", current_size); |
| 1739 CACHE_UMA(COUNTS_10000, "MaxSize", max_size); |
| 1740 if (!max_size) |
| 1741 max_size++; |
| 1742 CACHE_UMA(PERCENTAGE, "UsedSpace", current_size * 100 / max_size); |
| 1743 |
| 1744 CACHE_UMA(COUNTS_10000, "AverageOpenEntries", |
| 1745 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); |
| 1746 CACHE_UMA(COUNTS_10000, "MaxOpenEntries", |
| 1747 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); |
| 1748 stats_.SetCounter(Stats::MAX_ENTRIES, 0); |
| 1749 |
| 1750 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", |
| 1751 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); |
| 1752 CACHE_UMA(COUNTS_10000, "TotalDoomCache", |
| 1753 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); |
| 1754 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", |
| 1755 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); |
| 1756 stats_.SetCounter(Stats::FATAL_ERROR, 0); |
| 1757 stats_.SetCounter(Stats::DOOM_CACHE, 0); |
| 1758 stats_.SetCounter(Stats::DOOM_RECENT, 0); |
| 1759 |
| 1760 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; |
| 1761 if (!(header->flags & CACHE_EVICTED)) { |
| 1762 CACHE_UMA(HOURS, "TotalTimeNotFull", static_cast<int>(total_hours)); |
| 1763 return; |
| 1764 } |
| 1765 |
| 1766 // This is an up to date client that will report FirstEviction() data. After |
| 1767 // that event, start reporting this: |
| 1768 |
| 1769 CACHE_UMA(HOURS, "TotalTime", static_cast<int>(total_hours)); |
| 1770 |
| 1771 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; |
| 1772 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); |
| 1773 |
| 1774 // We may see users with no use_hours at this point if this is the first time |
| 1775 // we are running this code. |
| 1776 if (use_hours) |
| 1777 use_hours = total_hours - use_hours; |
| 1778 |
| 1779 if (!use_hours || !GetEntryCount() || !header->num_bytes) |
| 1780 return; |
| 1781 |
| 1782 CACHE_UMA(HOURS, "UseTime", static_cast<int>(use_hours)); |
| 1783 |
| 1784 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; |
| 1785 CACHE_UMA(COUNTS, "TrimRate", static_cast<int>(trim_rate)); |
| 1786 |
| 1787 int avg_size = header->num_bytes / GetEntryCount(); |
| 1788 CACHE_UMA(COUNTS, "EntrySize", avg_size); |
| 1789 CACHE_UMA(COUNTS, "EntriesFull", header->num_entries); |
| 1790 |
| 1791 int large_entries_bytes = stats_.GetLargeEntriesSize(); |
| 1792 int large_ratio = large_entries_bytes * 100 / header->num_bytes; |
| 1793 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", large_ratio); |
| 1794 |
| 1795 if (!lru_eviction_) { |
| 1796 CACHE_UMA(PERCENTAGE, "ResurrectRatio", stats_.GetResurrectRatio()); |
| 1797 CACHE_UMA(PERCENTAGE, "NoUseRatio", |
| 1798 header->num_no_use_entries * 100 / header->num_entries); |
| 1799 CACHE_UMA(PERCENTAGE, "LowUseRatio", |
| 1800 header->num_low_use_entries * 100 / header->num_entries); |
| 1801 CACHE_UMA(PERCENTAGE, "HighUseRatio", |
| 1802 header->num_high_use_entries * 100 / header->num_entries); |
| 1803 CACHE_UMA(PERCENTAGE, "DeletedRatio", |
| 1804 header->num_evicted_entries * 100 / header->num_entries); |
| 1805 } |
| 1806 |
| 1807 stats_.ResetRatios(); |
| 1808 stats_.SetCounter(Stats::TRIM_ENTRY, 0); |
| 1809 |
| 1810 if (cache_type_ == net::DISK_CACHE) |
| 1811 block_files_.ReportStats(); |
| 1812 } |
| 1813 |
| 1814 void BackendImplV3::ReportError(int error) { |
| 1815 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH || |
| 1816 error == ERR_CACHE_CREATED); |
| 1817 |
| 1818 // We transmit positive numbers, instead of direct error codes. |
| 1819 DCHECK_LE(error, 0); |
| 1820 CACHE_UMA(CACHE_ERROR, "Error", error * -1); |
| 1821 } |
| 1822 |
| 1823 bool BackendImplV3::CheckIndex() { |
| 1824 if (index_.header()->flags & CACHE_EVICTION_2) |
| 1825 lru_eviction_ = false; |
| 1826 |
| 1827 /* |
| 1828 if (!index_.header()->table_len) { |
| 1829 LOG(ERROR) << "Invalid table size"; |
| 1830 return false; |
| 1831 } |
| 1832 |
| 1833 if (current_size < GetIndexSize(index_.header()->table_len) || |
| 1834 index_.header()->table_len & (kBaseTableLen - 1)) { |
| 1835 LOG(ERROR) << "Corrupt Index file"; |
| 1836 return false; |
| 1837 } |
| 1838 |
| 1839 AdjustMaxCacheSize(index_.header()->table_len); |
| 1840 |
| 1841 #if !defined(NET_BUILD_STRESS_CACHE) |
| 1842 if (index_.header()->num_bytes < 0 || |
| 1843 (max_size_ < kint32max - kDefaultCacheSize && |
| 1844 index_.header()->num_bytes > max_size_ + kDefaultCacheSize)) { |
| 1845 LOG(ERROR) << "Invalid cache (current) size"; |
| 1846 return false; |
| 1847 } |
| 1848 #endif |
| 1849 |
| 1850 if (index_.header()->num_entries < 0) { |
| 1851 LOG(ERROR) << "Invalid number of entries"; |
| 1852 return false; |
| 1853 } |
| 1854 |
| 1855 if (!mask_) |
| 1856 mask_ = index_.header()->table_len - 1; |
| 1857 |
| 1858 // Load the table into memory with a single read. |
| 1859 scoped_array<char> buf(new char[current_size]); |
| 1860 return index_->Read(buf.get(), current_size, 0); |
| 1861 */ |
| 1862 |
| 1863 //Make sure things look fine, maybe scan the whole thing if not. |
| 1864 return true; |
| 1865 } |
| 1866 |
| 1867 int BackendImplV3::CheckAllEntries() { |
| 1868 /* |
| 1869 int num_dirty = 0; |
| 1870 int num_entries = 0; |
| 1871 DCHECK(mask_ < kuint32max); |
| 1872 for (unsigned int i = 0; i <= mask_; i++) { |
| 1873 Addr address(data_->table[i]); |
| 1874 if (!address.is_initialized()) |
| 1875 continue; |
| 1876 for (;;) { |
| 1877 EntryImplV3* tmp; |
| 1878 int ret = NewEntry(address, &tmp); |
| 1879 if (ret) { |
| 1880 STRESS_NOTREACHED(); |
| 1881 return ret; |
| 1882 } |
| 1883 scoped_refptr<EntryImplV3> cache_entry; |
| 1884 cache_entry.swap(&tmp); |
| 1885 |
| 1886 if (cache_entry->dirty()) |
| 1887 num_dirty++; |
| 1888 else if (CheckEntry(cache_entry.get())) |
| 1889 num_entries++; |
| 1890 else |
| 1891 return ERR_INVALID_ENTRY; |
| 1892 |
| 1893 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_); |
| 1894 address.set_value(cache_entry->GetNextAddress()); |
| 1895 if (!address.is_initialized()) |
| 1896 break; |
| 1897 } |
| 1898 } |
| 1899 |
| 1900 Trace("CheckAllEntries End"); |
| 1901 if (num_entries + num_dirty != index_.header()->num_entries) { |
| 1902 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << |
| 1903 " " << index_.header()->num_entries; |
| 1904 DCHECK_LT(num_entries, index_.header()->num_entries); |
| 1905 return ERR_NUM_ENTRIES_MISMATCH; |
| 1906 } |
| 1907 |
| 1908 return num_dirty; |
| 1909 */ |
| 1910 return 0; |
| 1911 } |
| 1912 |
| 1913 bool BackendImplV3::CheckEntry(EntryImplV3* cache_entry) { |
| 1914 /* |
| 1915 bool ok = block_files_.IsValid(cache_entry->entry()->address()); |
| 1916 ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); |
| 1917 EntryStore* data = cache_entry->entry()->Data(); |
| 1918 for (size_t i = 0; i < arraysize(data->data_addr); i++) { |
| 1919 if (data->data_addr[i]) { |
| 1920 Addr address(data->data_addr[i]); |
| 1921 if (address.is_block_file()) |
| 1922 ok = ok && block_files_.IsValid(address); |
| 1923 } |
| 1924 } |
| 1925 |
| 1926 return ok && cache_entry->rankings()->VerifyHash(); |
| 1927 */ |
| 1928 return true; |
| 1929 } |
| 1930 |
| 1931 int BackendImplV3::MaxBuffersSize() { |
| 1932 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); |
| 1933 static bool done = false; |
| 1934 |
| 1935 if (!done) { |
| 1936 const int kMaxBuffersSize = 30 * 1024 * 1024; |
| 1937 |
| 1938 // We want to use up to 2% of the computer's memory. |
| 1939 total_memory = total_memory * 2 / 100; |
| 1940 if (total_memory > kMaxBuffersSize || total_memory <= 0) |
| 1941 total_memory = kMaxBuffersSize; |
| 1942 |
| 1943 done = true; |
| 1944 } |
| 1945 |
| 1946 return static_cast<int>(total_memory); |
| 1947 } |
| 1948 |
| 1949 } // namespace disk_cache |
OLD | NEW |