OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "net/disk_cache/v3/backend_impl_v3.h" |
| 6 |
| 7 #include "base/bind.h" |
| 8 #include "base/bind_helpers.h" |
| 9 #include "base/file_util.h" |
| 10 #include "base/files/file_path.h" |
| 11 #include "base/hash.h" |
| 12 #include "base/message_loop.h" |
| 13 #include "base/metrics/field_trial.h" |
| 14 #include "base/metrics/histogram.h" |
| 15 #include "base/metrics/stats_counters.h" |
| 16 #include "base/rand_util.h" |
| 17 #include "base/string_util.h" |
| 18 #include "base/stringprintf.h" |
| 19 #include "base/sys_info.h" |
| 20 #include "base/threading/thread_restrictions.h" |
| 21 #include "base/threading/worker_pool.h" |
| 22 #include "base/time.h" |
| 23 #include "base/timer.h" |
| 24 #include "net/base/net_errors.h" |
| 25 #include "net/base/io_buffer.h" |
| 26 #include "net/disk_cache/errors.h" |
| 27 #include "net/disk_cache/experiments.h" |
| 28 #include "net/disk_cache/file.h" |
| 29 #include "net/disk_cache/storage_block-inl.h" |
| 30 #include "net/disk_cache/v3/backend_worker.h" |
| 31 #include "net/disk_cache/v3/backend_work_item.h" |
| 32 #include "net/disk_cache/v3/disk_format_v3.h" |
| 33 #include "net/disk_cache/v3/entry_impl_v3.h" |
| 34 #include "net/disk_cache/v3/index_table.h" |
| 35 |
| 36 // This has to be defined before including histogram_macros.h from this file. |
| 37 #define NET_DISK_CACHE_BACKEND_IMPL_CC_ |
| 38 #include "net/disk_cache/histogram_macros.h" |
| 39 |
| 40 using base::Time; |
| 41 using base::TimeDelta; |
| 42 using base::TimeTicks; |
| 43 |
| 44 namespace { |
| 45 |
| 46 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people. |
| 47 // Note that the actual target is to keep the index table load factor under 55% |
| 48 // for most users. |
| 49 const int k64kEntriesStore = 240 * 1000 * 1000; |
| 50 const int kBaseTableLen = 64 * 1024; |
| 51 const int kDefaultCacheSize = 80 * 1024 * 1024; |
| 52 |
| 53 // Avoid trimming the cache for the first 5 minutes (10 timer ticks). |
| 54 const int kTrimDelay = 10; |
| 55 const int kTimerSeconds = 30; |
| 56 |
| 57 const size_t kMaxKeySize = 64 * 1024; |
| 58 |
| 59 int DesiredIndexTableLen(int32 storage_size) { |
| 60 if (storage_size <= k64kEntriesStore) |
| 61 return kBaseTableLen; |
| 62 if (storage_size <= k64kEntriesStore * 2) |
| 63 return kBaseTableLen * 2; |
| 64 if (storage_size <= k64kEntriesStore * 4) |
| 65 return kBaseTableLen * 4; |
| 66 if (storage_size <= k64kEntriesStore * 8) |
| 67 return kBaseTableLen * 8; |
| 68 |
| 69 // The biggest storage_size for int32 requires a 4 MB table. |
| 70 return kBaseTableLen * 16; |
| 71 } |
| 72 |
| 73 int MaxStorageSizeForTable(int table_len) { |
| 74 return table_len * (k64kEntriesStore / kBaseTableLen); |
| 75 } |
| 76 |
| 77 size_t GetIndexBitmapSize(int table_len) { |
| 78 DCHECK_LT(table_len, 1 << 22); |
| 79 size_t base_bits = disk_cache::kBaseBitmapBytes * 8; |
| 80 if (table_len < static_cast<int>(base_bits)) |
| 81 return sizeof(disk_cache::IndexBitmap); |
| 82 |
| 83 size_t num_pages = (table_len / 8) - disk_cache::kBaseBitmapBytes; |
| 84 num_pages = (num_pages + 4095) / 4096; |
| 85 return sizeof(disk_cache::IndexHeaderV3) + num_pages * 4096; |
| 86 } |
| 87 |
| 88 } // namespace |
| 89 |
| 90 // ------------------------------------------------------------------------ |
| 91 |
| 92 namespace disk_cache { |
| 93 |
| 94 // Exported by disk_cache/backend_impl.cc |
| 95 // Returns the preferred max cache size given the available disk space. |
| 96 NET_EXPORT_PRIVATE int PreferedCacheSize(int64 available); |
| 97 |
| 98 BackendImplV3::BackendImplV3(const base::FilePath& path, |
| 99 base::MessageLoopProxy* cache_thread, |
| 100 net::NetLog* net_log) |
| 101 : index_(this), |
| 102 path_(path), |
| 103 block_files_(this), |
| 104 max_size_(0), |
| 105 up_ticks_(0), |
| 106 test_seconds_(0), |
| 107 cache_type_(net::DISK_CACHE), |
| 108 uma_report_(0), |
| 109 user_flags_(0), |
| 110 init_(false), |
| 111 restarted_(false), |
| 112 read_only_(false), |
| 113 disabled_(false), |
| 114 lru_eviction_(true), |
| 115 first_timer_(true), |
| 116 user_load_(false), |
| 117 growing_index_(false), |
| 118 growing_files_(false), |
| 119 net_log_(net_log), |
| 120 cache_thread_(cache_thread), |
| 121 ptr_factory_(this) { |
| 122 } |
| 123 |
| 124 BackendImplV3::~BackendImplV3() { |
| 125 CleanupCache(); |
| 126 } |
| 127 |
| 128 int BackendImplV3::Init(const CompletionCallback& callback) { |
| 129 DCHECK(!init_); |
| 130 if (init_) |
| 131 return net::ERR_FAILED; |
| 132 |
| 133 worker_ = new Worker(path_, base::MessageLoopProxy::current()); |
| 134 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_INIT); |
| 135 work_item->set_user_callback(callback); |
| 136 work_item->set_flags(user_flags_); |
| 137 PostWorkItem(work_item); |
| 138 |
| 139 return net::ERR_IO_PENDING; |
| 140 } |
| 141 |
| 142 // ------------------------------------------------------------------------ |
| 143 |
| 144 int BackendImplV3::OpenPrevEntry(void** iter, Entry** prev_entry, |
| 145 const CompletionCallback& callback) { |
| 146 DCHECK(!callback.is_null()); |
| 147 return OpenFollowingEntry(false, iter, prev_entry, callback); |
| 148 } |
| 149 |
| 150 bool BackendImplV3::SetMaxSize(int max_bytes) { |
| 151 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model); |
| 152 if (max_bytes < 0) |
| 153 return false; |
| 154 |
| 155 // Zero size means use the default. |
| 156 if (!max_bytes) |
| 157 return true; |
| 158 |
| 159 // Avoid a DCHECK later on. |
| 160 if (max_bytes >= kint32max - kint32max / 10) |
| 161 max_bytes = kint32max - kint32max / 10 - 1; |
| 162 |
| 163 user_flags_ |= MAX_SIZE; |
| 164 max_size_ = max_bytes; |
| 165 return true; |
| 166 } |
| 167 |
| 168 void BackendImplV3::SetType(net::CacheType type) { |
| 169 DCHECK_NE(net::MEMORY_CACHE, type); |
| 170 cache_type_ = type; |
| 171 } |
| 172 |
| 173 bool BackendImplV3::CreateBlock(FileType block_type, int block_count, |
| 174 Addr* block_address) { |
| 175 return block_files_.CreateBlock(block_type, block_count, block_address); |
| 176 } |
| 177 |
| 178 void BackendImplV3::UpdateRank(EntryImplV3* entry, bool modified) { |
| 179 if (!modified && (cache_type() == net::SHADER_CACHE || read_only_)) |
| 180 return; |
| 181 |
| 182 index_.UpdateTime(entry->GetHash(), entry->GetAddress()); |
| 183 } |
| 184 |
| 185 void BackendImplV3::InternalDoomEntry(EntryImplV3* entry) { |
| 186 uint32 hash = entry->GetHash(); |
| 187 std::string key = entry->GetKey(); |
| 188 Addr entry_addr = entry->GetAddress(); |
| 189 |
| 190 Trace("Doom entry 0x%p", entry); |
| 191 |
| 192 bool rv = index_.SetSate(hash, entry_addr, ENTRY_DELETED); |
| 193 DCHECK(rv); |
| 194 |
| 195 //Add to the list of entries to delete, and grab an extra ref to the entry so
that close |
| 196 //(and delete) happens later. |
| 197 |
| 198 //And remove from the list of open entries. |
| 199 EntriesMap::iterator it = open_entries_.find(entry_addr.value()); |
| 200 if (it != open_entries_.end()) |
| 201 open_entries_.erase(it); |
| 202 |
| 203 entry->InternalDoom(); |
| 204 DecreaseNumEntries(); |
| 205 } |
| 206 |
| 207 void BackendImplV3::OnEntryDestroyBegin(Addr address) { |
| 208 if (disabled_) |
| 209 return; |
| 210 EntriesMap::iterator it = open_entries_.find(address.value()); |
| 211 if (it != open_entries_.end()) { |
| 212 bool rv = index_.SetSate(it->second->GetHash(), address, ENTRY_USED); |
| 213 DCHECK(rv); |
| 214 open_entries_.erase(it); |
| 215 } |
| 216 } |
| 217 |
| 218 void BackendImplV3::OnEntryDestroyEnd() { |
| 219 DecreaseNumRefs(); |
| 220 if (disabled_) |
| 221 return; |
| 222 if (index_.header()->num_bytes > max_size_ && !read_only_ && |
| 223 (up_ticks_ > kTrimDelay || user_flags_ & BASIC_UNIT_TEST)) { |
| 224 eviction_.TrimCache(); |
| 225 } |
| 226 } |
| 227 |
| 228 void BackendImplV3::ReadData(EntryImplV3* entry, Addr address, int offset, |
| 229 net::IOBuffer* buffer, int buffer_len, |
| 230 const CompletionCallback& callback) { |
| 231 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_READ_DATA); |
| 232 work_item->set_buffer(buffer); |
| 233 work_item->set_buffer_len(buffer_len); |
| 234 work_item->set_address(address); |
| 235 work_item->set_offset(offset); |
| 236 work_item->set_user_callback(callback); |
| 237 if (entry) |
| 238 work_item->set_owner_entry(entry); |
| 239 |
| 240 PostWorkItem(work_item); |
| 241 } |
| 242 |
| 243 void BackendImplV3::WriteData(EntryImplV3* entry, Addr address, int offset, |
| 244 net::IOBuffer* buffer, int buffer_len, |
| 245 const CompletionCallback& callback) { |
| 246 if (!buffer_len) { |
| 247 DCHECK(callback.is_null()); |
| 248 return; |
| 249 } |
| 250 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_DATA); |
| 251 work_item->set_buffer(buffer); |
| 252 work_item->set_buffer_len(buffer_len); |
| 253 work_item->set_address(address); |
| 254 work_item->set_offset(offset); |
| 255 work_item->set_user_callback(callback); |
| 256 work_item->set_owner_entry(entry); |
| 257 PostWorkItem(work_item); |
| 258 } |
| 259 |
| 260 void BackendImplV3::MoveData(EntryImplV3* entry, Addr source, |
| 261 Addr destination, int len, |
| 262 const CompletionCallback& callback) { |
| 263 DCHECK(len); |
| 264 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_MOVE_DATA); |
| 265 work_item->set_buffer_len(len); |
| 266 work_item->set_address(source); |
| 267 work_item->set_address2(destination); |
| 268 work_item->set_user_callback(callback); |
| 269 work_item->set_owner_entry(entry); |
| 270 PostWorkItem(work_item);//+delete source |
| 271 } |
| 272 |
| 273 void BackendImplV3::Truncate(EntryImplV3* entry, Addr address, int offset) { |
| 274 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_TRUNCATE); |
| 275 work_item->set_address(address); |
| 276 work_item->set_offset(offset); |
| 277 work_item->set_owner_entry(entry); |
| 278 PostWorkItem(work_item); |
| 279 } |
| 280 |
| 281 void BackendImplV3::Delete(EntryImplV3* entry, Addr address) { |
| 282 if (disabled_) |
| 283 return; |
| 284 if (address.is_separate_file()) { |
| 285 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_DELETE); |
| 286 work_item->set_address(address); |
| 287 work_item->set_owner_entry(entry); |
| 288 PostWorkItem(work_item); |
| 289 |
| 290 // And now delete the block itself. |
| 291 address = address.AsBlockFile(); |
| 292 } |
| 293 |
| 294 int size = Addr::BlockSizeForFileType(address.file_type()); |
| 295 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size)); |
| 296 memset(buffer->data(), 0, size); |
| 297 WriteData(entry, address, 0, buffer, size, net::CompletionCallback()); |
| 298 |
| 299 if (address == entry->GetAddress()) { |
| 300 bool rv = index_.SetSate(entry->GetHash(), address, ENTRY_FREE); |
| 301 DCHECK(rv); |
| 302 } |
| 303 |
| 304 block_files_.DeleteBlock(address);// to be moved to a backup cycle. has to de
done at the end of task |
| 305 } |
| 306 |
| 307 void BackendImplV3::Close(EntryImplV3* entry, Addr address) { |
| 308 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLOSE); |
| 309 work_item->set_address(address); |
| 310 work_item->set_owner_entry(entry); |
| 311 PostWorkItem(work_item); |
| 312 } |
| 313 |
| 314 void BackendImplV3::DeleteCell(IndexCell* cell, int32 cell_id) { |
| 315 NOTREACHED(); |
| 316 // Post task to delete this cell. |
| 317 // look at a local map of cells being deleted. |
| 318 } |
| 319 |
| 320 void BackendImplV3::FixCell(IndexCell* cell, int32 cell_id) { |
| 321 NOTREACHED(); |
| 322 } |
| 323 |
| 324 bool BackendImplV3::EvictEntry(uint32 hash, Addr address) { |
| 325 EntriesMap::iterator it = open_entries_.find(address.value()); |
| 326 if (it != open_entries_.end()) |
| 327 return false; |
| 328 |
| 329 EntryCell old_cell = index_.FindEntryCell(hash, address); |
| 330 if (!old_cell.IsValid() || old_cell.state() != ENTRY_USED) |
| 331 return false; |
| 332 |
| 333 EntrySet entries; |
| 334 entries.cells.push_back(old_cell); |
| 335 |
| 336 uint32 flags = WorkItem::WORK_FOR_EVICT; |
| 337 if (lru_eviction_) { |
| 338 flags |= WorkItem::WORK_NO_COPY; |
| 339 } else { |
| 340 Addr new_address; |
| 341 if (!block_files_.CreateBlock(BLOCK_EVICTED, 1, &new_address)) |
| 342 return false; |
| 343 |
| 344 EntryCell new_cell = index_.CreateEntryCell(hash, new_address); |
| 345 if (!new_cell.IsValid()) { |
| 346 block_files_.DeleteBlock(new_address); |
| 347 return false; |
| 348 } |
| 349 entries.cells.push_back(new_cell); |
| 350 } |
| 351 |
| 352 // We have to go through the index to keep the bitmaps consistent. |
| 353 //index_.SetSate(hash, address, ENTRY_DELETED); |
| 354 |
| 355 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 356 work_item->set_flags(flags); |
| 357 work_item->set_entries(entries); |
| 358 PostWorkItem(work_item); |
| 359 |
| 360 return true; |
| 361 } |
| 362 |
| 363 EntryImplV3* BackendImplV3::GetOpenEntry(Addr address) const { |
| 364 EntriesMap::const_iterator it = open_entries_.find(address.value()); |
| 365 if (it != open_entries_.end()) { |
| 366 // We have this entry in memory. |
| 367 it->second->AddRef(); |
| 368 it->second->OnOpenEntry(); |
| 369 return it->second; |
| 370 } |
| 371 |
| 372 return NULL; |
| 373 } |
| 374 |
| 375 int BackendImplV3::MaxFileSize() const { |
| 376 return max_size_ / 8; |
| 377 } |
| 378 |
| 379 void BackendImplV3::ModifyStorageSize(int32 old_size, int32 new_size) { |
| 380 if (disabled_ || old_size == new_size) |
| 381 return; |
| 382 if (old_size > new_size) |
| 383 SubstractStorageSize(old_size - new_size); |
| 384 else |
| 385 AddStorageSize(new_size - old_size); |
| 386 |
| 387 // Update the usage statistics. |
| 388 stats_.ModifyStorageStats(old_size, new_size); |
| 389 } |
| 390 |
| 391 void BackendImplV3::TooMuchStorageRequested(int32 size) { |
| 392 stats_.ModifyStorageStats(0, size); |
| 393 } |
| 394 |
| 395 bool BackendImplV3::IsAllocAllowed(int current_size, int new_size, bool force) { |
| 396 DCHECK_GT(new_size, current_size); |
| 397 if (!force && (user_flags_ & NO_BUFFERING)) |
| 398 return false; |
| 399 |
| 400 int to_add = new_size - current_size; |
| 401 if (!force && (buffer_bytes_ + to_add > MaxBuffersSize())) |
| 402 return false; |
| 403 |
| 404 buffer_bytes_ += to_add; |
| 405 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024); |
| 406 return true; |
| 407 } |
| 408 |
| 409 void BackendImplV3::BufferDeleted(int size) { |
| 410 DCHECK_GE(size, 0); |
| 411 buffer_bytes_ -= size; |
| 412 DCHECK_GE(buffer_bytes_, 0); |
| 413 } |
| 414 |
| 415 bool BackendImplV3::IsLoaded() const { |
| 416 if (user_flags_ & NO_LOAD_PROTECTION) |
| 417 return false; |
| 418 |
| 419 return user_load_; |
| 420 } |
| 421 |
| 422 base::Time BackendImplV3::GetCurrentTime() const { |
| 423 Time base_time = Time::Now(); |
| 424 if (!test_seconds_) |
| 425 return base_time; |
| 426 |
| 427 return base_time + TimeDelta::FromSeconds(test_seconds_); |
| 428 } |
| 429 |
| 430 std::string BackendImplV3::HistogramName(const char* name, |
| 431 int experiment) const { |
| 432 static const char* names[] = { "Http", "", "Media", "AppCache", "Shader" }; |
| 433 DCHECK_NE(cache_type_, net::MEMORY_CACHE); |
| 434 if (!experiment) |
| 435 return base::StringPrintf("DiskCache3.%s.%s", names[cache_type_], name); |
| 436 return base::StringPrintf("DiskCache3.%s.%s_%d", names[cache_type_], |
| 437 name, experiment); |
| 438 } |
| 439 |
| 440 base::WeakPtr<BackendImplV3> BackendImplV3::GetWeakPtr() { |
| 441 return ptr_factory_.GetWeakPtr(); |
| 442 } |
| 443 |
| 444 // We want to remove biases from some histograms so we only send data once per |
| 445 // week. |
| 446 bool BackendImplV3::ShouldReportAgain() { |
| 447 if (uma_report_) |
| 448 return uma_report_ == 2; |
| 449 |
| 450 uma_report_++; |
| 451 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); |
| 452 Time last_time = Time::FromInternalValue(last_report); |
| 453 if (!last_report || (GetCurrentTime() - last_time).InDays() >= 7) { |
| 454 stats_.SetCounter(Stats::LAST_REPORT, GetCurrentTime().ToInternalValue()); |
| 455 uma_report_++; |
| 456 return true; |
| 457 } |
| 458 return false; |
| 459 } |
| 460 |
| 461 void BackendImplV3::FirstEviction() { |
| 462 IndexHeaderV3* header = index_.header(); |
| 463 header->flags |= CACHE_EVICTED; |
| 464 DCHECK(header->create_time); |
| 465 if (!GetEntryCount()) |
| 466 return; // This is just for unit tests. |
| 467 |
| 468 Time create_time = Time::FromInternalValue(header->create_time); |
| 469 CACHE_UMA(AGE, "FillupAge", 0, create_time); |
| 470 |
| 471 int64 use_time = stats_.GetCounter(Stats::TIMER); |
| 472 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120)); |
| 473 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio()); |
| 474 |
| 475 if (!use_time) |
| 476 use_time = 1; |
| 477 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0, |
| 478 static_cast<int>(header->num_entries / use_time)); |
| 479 CACHE_UMA(COUNTS, "FirstByteIORate", 0, |
| 480 static_cast<int>((header->num_bytes / 1024) / use_time)); |
| 481 |
| 482 int avg_size = header->num_bytes / GetEntryCount(); |
| 483 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size); |
| 484 |
| 485 int large_entries_bytes = stats_.GetLargeEntriesSize(); |
| 486 int large_ratio = large_entries_bytes * 100 / header->num_bytes; |
| 487 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio); |
| 488 |
| 489 if (!lru_eviction_) { |
| 490 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); |
| 491 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, |
| 492 header->num_no_use_entries * 100 / header->num_entries); |
| 493 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, |
| 494 header->num_low_use_entries * 100 / header->num_entries); |
| 495 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, |
| 496 header->num_high_use_entries * 100 / header->num_entries); |
| 497 } |
| 498 |
| 499 stats_.ResetRatios(); |
| 500 } |
| 501 |
| 502 void BackendImplV3::OnEvent(Stats::Counters an_event) { |
| 503 stats_.OnEvent(an_event); |
| 504 } |
| 505 |
| 506 void BackendImplV3::OnRead(int32 bytes) { |
| 507 DCHECK_GE(bytes, 0); |
| 508 byte_count_ += bytes; |
| 509 if (byte_count_ < 0) |
| 510 byte_count_ = kint32max; |
| 511 } |
| 512 |
| 513 void BackendImplV3::OnWrite(int32 bytes) { |
| 514 // We use the same implementation as OnRead... just log the number of bytes. |
| 515 OnRead(bytes); |
| 516 } |
| 517 |
| 518 void BackendImplV3::GrowIndex() { |
| 519 if (growing_index_ || disabled_) |
| 520 return; |
| 521 growing_index_ = true; |
| 522 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_GROW_INDEX); |
| 523 PostWorkItem(work_item); |
| 524 } |
| 525 |
| 526 void BackendImplV3::GrowBlockFiles() { |
| 527 if (growing_files_ || disabled_) |
| 528 return; |
| 529 growing_files_ = true; |
| 530 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_GROW_FILES); |
| 531 PostWorkItem(work_item); |
| 532 } |
| 533 |
| 534 void BackendImplV3::OnStatsTimer() { |
| 535 stats_.OnEvent(Stats::TIMER); |
| 536 int64 time = stats_.GetCounter(Stats::TIMER); |
| 537 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES); |
| 538 |
| 539 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding |
| 540 // the bias towards 0. |
| 541 if (num_refs_ && (current != num_refs_)) { |
| 542 int64 diff = (num_refs_ - current) / 50; |
| 543 if (!diff) |
| 544 diff = num_refs_ > current ? 1 : -1; |
| 545 current = current + diff; |
| 546 stats_.SetCounter(Stats::OPEN_ENTRIES, current); |
| 547 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_); |
| 548 } |
| 549 |
| 550 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_); |
| 551 |
| 552 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_); |
| 553 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024); |
| 554 |
| 555 // These values cover about 99.5% of the population (Oct 2011). |
| 556 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024); |
| 557 entry_count_ = 0; |
| 558 byte_count_ = 0; |
| 559 up_ticks_++; |
| 560 |
| 561 if (disabled_) |
| 562 first_timer_ = false; |
| 563 if (first_timer_) { |
| 564 first_timer_ = false; |
| 565 if (ShouldReportAgain()) |
| 566 ReportStats(); |
| 567 } |
| 568 |
| 569 // Save stats to disk at 5 min intervals. |
| 570 if (time % 10 == 0) |
| 571 StoreStats(); |
| 572 } |
| 573 |
| 574 void BackendImplV3::SetUnitTestMode() { |
| 575 user_flags_ |= UNIT_TEST_MODE; |
| 576 } |
| 577 |
| 578 void BackendImplV3::SetUpgradeMode() { |
| 579 user_flags_ |= UPGRADE_MODE; |
| 580 read_only_ = true; |
| 581 } |
| 582 |
| 583 void BackendImplV3::SetNewEviction() { |
| 584 user_flags_ |= EVICTION_V2; |
| 585 lru_eviction_ = false; |
| 586 } |
| 587 |
| 588 void BackendImplV3::SetFlags(uint32 flags) { |
| 589 user_flags_ |= flags; |
| 590 } |
| 591 |
| 592 int BackendImplV3::FlushQueueForTest(const CompletionCallback& callback) { |
| 593 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_NONE); |
| 594 work_item->set_user_callback(callback); |
| 595 PostWorkItem(work_item); |
| 596 return net::ERR_IO_PENDING; |
| 597 } |
| 598 |
| 599 int BackendImplV3::CleanupForTest(const CompletionCallback& callback) { |
| 600 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLEANUP); |
| 601 work_item->set_user_callback(callback); |
| 602 PostWorkItem(work_item); |
| 603 worker_ = NULL; |
| 604 init_ = false; |
| 605 disabled_ = true; |
| 606 return net::ERR_IO_PENDING; |
| 607 } |
| 608 |
| 609 void BackendImplV3::TrimForTest(bool empty) { |
| 610 eviction_.SetTestMode(); |
| 611 if (empty) |
| 612 eviction_.TrimAllCache(CompletionCallback()); |
| 613 else |
| 614 eviction_.TrimCache(); |
| 615 } |
| 616 |
| 617 void BackendImplV3::TrimDeletedListForTest(bool empty) { |
| 618 eviction_.SetTestMode(); |
| 619 eviction_.TrimDeletedList(empty); |
| 620 } |
| 621 |
| 622 void BackendImplV3::AddDelayForTest(int seconds) { |
| 623 Trace("Add %d deconds", seconds); |
| 624 int old_timers = test_seconds_ / kTimerSeconds; |
| 625 test_seconds_ += seconds; |
| 626 if (old_timers != test_seconds_ / kTimerSeconds) |
| 627 OnStatsTimer(); |
| 628 } |
| 629 |
| 630 int BackendImplV3::WaitForEntryToCloseForTest( |
| 631 const std::string& key, |
| 632 const CompletionCallback& callback) { |
| 633 DCHECK(!callback.is_null()); |
| 634 if (disabled_ || key.empty()) |
| 635 return net::ERR_FAILED; |
| 636 |
| 637 uint32 hash = base::Hash(key); |
| 638 |
| 639 EntrySet entries = index_.LookupEntry(hash); |
| 640 if (!entries.cells.size()) |
| 641 return net::OK; |
| 642 |
| 643 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
| 644 return net::OK; |
| 645 |
| 646 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
| 647 if (open_entry) { |
| 648 open_entry->NotifyDestructionForTest(callback); |
| 649 open_entry->Close(); |
| 650 return net::ERR_IO_PENDING; |
| 651 } |
| 652 |
| 653 return net::OK; |
| 654 } |
| 655 |
| 656 int BackendImplV3::SelfCheck() { |
| 657 if (!init_) { |
| 658 LOG(ERROR) << "Init failed"; |
| 659 return ERR_INIT_FAILED; |
| 660 } |
| 661 |
| 662 /*int num_entries = rankings_.SelfCheck(); |
| 663 if (num_entries < 0) { |
| 664 LOG(ERROR) << "Invalid rankings list, error " << num_entries; |
| 665 #if !defined(NET_BUILD_STRESS_CACHE) |
| 666 return num_entries; |
| 667 #endif |
| 668 } |
| 669 |
| 670 if (num_entries != index_.header()->num_entries) { |
| 671 LOG(ERROR) << "Number of entries mismatch"; |
| 672 #if !defined(NET_BUILD_STRESS_CACHE) |
| 673 return ERR_NUM_ENTRIES_MISMATCH; |
| 674 #endif |
| 675 }*/ |
| 676 |
| 677 return CheckAllEntries(); |
| 678 } |
| 679 |
| 680 // ------------------------------------------------------------------------ |
| 681 |
| 682 net::CacheType BackendImplV3::GetCacheType() const { |
| 683 return cache_type_; |
| 684 } |
| 685 |
| 686 int32 BackendImplV3::GetEntryCount() const { |
| 687 if (disabled_) |
| 688 return 0; |
| 689 DCHECK(init_); |
| 690 return index_.header()->num_entries; |
| 691 } |
| 692 |
| 693 int BackendImplV3::OpenEntry(const std::string& key, Entry** entry, |
| 694 const CompletionCallback& callback) { |
| 695 DCHECK(!callback.is_null()); |
| 696 if (disabled_ || key.empty()) |
| 697 return net::ERR_FAILED; |
| 698 |
| 699 uint32 hash = base::Hash(key); |
| 700 Trace("Open hash 0x%x", hash); |
| 701 |
| 702 EntrySet entries = index_.LookupEntry(hash); |
| 703 if (!entries.cells.size()) |
| 704 return net::ERR_FAILED; |
| 705 |
| 706 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
| 707 return net::ERR_FAILED; |
| 708 |
| 709 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
| 710 if (open_entry) { |
| 711 *entry = open_entry; |
| 712 eviction_.OnOpenEntry(open_entry); |
| 713 entry_count_++; |
| 714 |
| 715 Trace("Open hash 0x%x end: 0x%x", hash, open_entry->GetAddress().value()); |
| 716 stats_.OnEvent(Stats::OPEN_HIT); |
| 717 SIMPLE_STATS_COUNTER("disk_cache.hit"); |
| 718 return net::OK; |
| 719 } |
| 720 |
| 721 // Read the entry from disk. |
| 722 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 723 work_item->set_entries(entries); |
| 724 work_item->set_user_callback(callback); |
| 725 work_item->set_key(key); |
| 726 work_item->set_entry_buffer(entry); |
| 727 PostWorkItem(work_item); |
| 728 |
| 729 return net::ERR_IO_PENDING; |
| 730 } |
| 731 |
| 732 int BackendImplV3::CreateEntry(const std::string& key, Entry** entry, |
| 733 const CompletionCallback& callback) { |
| 734 DCHECK(init_); |
| 735 DCHECK(!callback.is_null()); |
| 736 if (disabled_ || key.empty() || key.size() > kMaxKeySize) |
| 737 return net::ERR_FAILED; |
| 738 |
| 739 uint32 hash = base::Hash(key); |
| 740 Trace("Create hash 0x%x", hash); |
| 741 |
| 742 EntrySet entries = index_.LookupEntry(hash); |
| 743 if (entries.cells.size()) { |
| 744 if (entries.cells.size() != static_cast<size_t>(entries.evicted_count)) { |
| 745 // but we may have a hash collision :(. So create a work item to check it
here!. |
| 746 // kep collission specfic map |
| 747 return net::ERR_FAILED; |
| 748 } |
| 749 |
| 750 // On the other hand, we have only deleted items that we may resurrect. |
| 751 // Read the entry from disk. |
| 752 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 753 work_item->set_flags(WorkItem::WORK_FOR_RESURRECT); |
| 754 work_item->set_entries(entries); |
| 755 work_item->set_user_callback(callback); |
| 756 work_item->set_key(key); |
| 757 work_item->set_entry_buffer(entry); |
| 758 PostWorkItem(work_item); |
| 759 |
| 760 return net::ERR_IO_PENDING; |
| 761 } |
| 762 return OnCreateEntryComplete(key, hash, NULL, entry, callback); |
| 763 } |
| 764 |
| 765 int BackendImplV3::DoomEntry(const std::string& key, |
| 766 const CompletionCallback& callback) { |
| 767 DCHECK(!callback.is_null()); |
| 768 if (disabled_ || key.empty()) |
| 769 return net::ERR_FAILED; |
| 770 |
| 771 uint32 hash = base::Hash(key); |
| 772 Trace("DoomEntry hash 0x%x", hash); |
| 773 |
| 774 EntrySet entries = index_.LookupEntry(hash); |
| 775 if (!entries.cells.size()) |
| 776 return net::ERR_FAILED; |
| 777 |
| 778 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
| 779 return net::ERR_FAILED; |
| 780 |
| 781 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
| 782 if (open_entry) { |
| 783 open_entry->Doom(); |
| 784 open_entry->Close(); |
| 785 return net::OK; |
| 786 } |
| 787 |
| 788 // Read the entry from disk. |
| 789 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 790 work_item->set_flags(WorkItem::WORK_FOR_DOOM); |
| 791 work_item->set_entries(entries); |
| 792 work_item->set_user_callback(callback); |
| 793 work_item->set_key(key); |
| 794 PostWorkItem(work_item); |
| 795 |
| 796 return net::ERR_IO_PENDING; |
| 797 } |
| 798 |
| 799 int BackendImplV3::DoomAllEntries(const CompletionCallback& callback) { |
| 800 if (disabled_) |
| 801 return net::ERR_FAILED; |
| 802 |
| 803 // This is not really an error, but it is an interesting condition. |
| 804 ReportError(ERR_CACHE_DOOMED); |
| 805 stats_.OnEvent(Stats::DOOM_CACHE); |
| 806 if (!num_refs_) { |
| 807 RestartCache(callback); |
| 808 return init_ ? net::OK : net::ERR_IO_PENDING; |
| 809 } |
| 810 return eviction_.TrimAllCache(callback); |
| 811 } |
| 812 |
| 813 int BackendImplV3::DoomEntriesBetween(base::Time initial_time, |
| 814 base::Time end_time, |
| 815 const CompletionCallback& callback) { |
| 816 DCHECK_NE(net::APP_CACHE, cache_type_); |
| 817 Time now = GetCurrentTime(); |
| 818 if (end_time.is_null() || end_time > now) |
| 819 end_time = now; |
| 820 |
| 821 DCHECK(end_time >= initial_time); |
| 822 |
| 823 if (disabled_) |
| 824 return net::ERR_FAILED; |
| 825 |
| 826 scoped_ptr<IndexIterator> to_delete(new IndexIterator); |
| 827 to_delete->forward = true; |
| 828 to_delete->timestamp = index_.GetTimestamp(end_time) + 1; |
| 829 |
| 830 // Prepare to read the first entry from disk. |
| 831 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 832 work_item->set_flags(WorkItem::WORK_FOR_ITERATION | |
| 833 WorkItem::WORK_FOR_DOOM_RANGE); |
| 834 work_item->set_initial_time(initial_time); |
| 835 work_item->set_end_time(end_time); |
| 836 work_item->set_iterator(to_delete.Pass()); |
| 837 |
| 838 if (!OpenNext(work_item)) |
| 839 return net::OK; |
| 840 |
| 841 work_item->set_user_callback(callback); |
| 842 return net::ERR_IO_PENDING; |
| 843 } |
| 844 |
| 845 int BackendImplV3::DoomEntriesSince(base::Time initial_time, |
| 846 const CompletionCallback& callback) { |
| 847 DCHECK_NE(net::APP_CACHE, cache_type_); |
| 848 return DoomEntriesBetween(initial_time, GetCurrentTime(), callback); |
| 849 } |
| 850 |
| 851 int BackendImplV3::OpenNextEntry(void** iter, Entry** next_entry, |
| 852 const CompletionCallback& callback) { |
| 853 DCHECK(!callback.is_null()); |
| 854 return OpenFollowingEntry(true, iter, next_entry, callback); |
| 855 } |
| 856 |
| 857 void BackendImplV3::EndEnumeration(void** iter) { |
| 858 scoped_ptr<IndexIterator> iterator( |
| 859 reinterpret_cast<IndexIterator*>(*iter)); |
| 860 *iter = NULL; |
| 861 } |
| 862 |
| 863 void BackendImplV3::GetStats(StatsItems* stats) { |
| 864 if (disabled_) |
| 865 return; |
| 866 |
| 867 std::pair<std::string, std::string> item; |
| 868 |
| 869 item.first = "Entries"; |
| 870 item.second = base::StringPrintf("%d", index_.header()->num_entries); |
| 871 stats->push_back(item); |
| 872 |
| 873 item.first = "Max size"; |
| 874 item.second = base::StringPrintf("%d", max_size_); |
| 875 stats->push_back(item); |
| 876 |
| 877 item.first = "Current size"; |
| 878 item.second = base::StringPrintf("%d", index_.header()->num_bytes); |
| 879 stats->push_back(item); |
| 880 |
| 881 stats_.GetItems(stats); |
| 882 } |
| 883 |
| 884 void BackendImplV3::OnExternalCacheHit(const std::string& key) { |
| 885 if (disabled_ || key.empty()) |
| 886 return; |
| 887 |
| 888 uint32 hash = base::Hash(key); |
| 889 EntrySet entries = index_.LookupEntry(hash); |
| 890 if (!entries.cells.size()) |
| 891 return; |
| 892 |
| 893 if (entries.cells.size() == static_cast<size_t>(entries.evicted_count)) |
| 894 return; |
| 895 |
| 896 for (size_t i = 0; i < entries.cells.size(); i++) { |
| 897 if (entries.cells[i].group() == ENTRY_EVICTED) |
| 898 continue; |
| 899 |
| 900 index_.UpdateTime(hash, entries.cells[i].GetAddress()); |
| 901 } |
| 902 |
| 903 EntryImplV3* open_entry = LookupOpenEntry(entries, key); |
| 904 if (open_entry) { |
| 905 eviction_.OnOpenEntry(open_entry); |
| 906 entry_count_++; |
| 907 UpdateRank(open_entry, true); |
| 908 open_entry->Close(); |
| 909 return; |
| 910 } |
| 911 |
| 912 if (user_flags_ & UNIT_TEST_MODE) { |
| 913 for (size_t i = 0; i < entries.cells.size(); i++) { |
| 914 // This method doesn't have a callback, and it may take a while for the |
| 915 // operation to complete so update the time of any entry with this hash. |
| 916 if (entries.cells[i].group() != ENTRY_EVICTED) |
| 917 index_.UpdateTime(hash, entries.cells[i].GetAddress()); |
| 918 } |
| 919 } |
| 920 |
| 921 // Read the entry from disk. |
| 922 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 923 work_item->set_flags(WorkItem::WORK_FOR_UPDATE); |
| 924 work_item->set_entries(entries); |
| 925 work_item->set_key(key); |
| 926 PostWorkItem(work_item); |
| 927 } |
| 928 |
| 929 // ------------------------------------------------------------------------ |
| 930 |
| 931 // The maximum cache size will be either set explicitly by the caller, or |
| 932 // calculated by this code. |
| 933 void BackendImplV3::AdjustMaxCacheSize() { |
| 934 if (max_size_) |
| 935 return; |
| 936 |
| 937 // The user is not setting the size, let's figure it out. |
| 938 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_); |
| 939 if (available < 0) { |
| 940 max_size_ = kDefaultCacheSize; |
| 941 return; |
| 942 } |
| 943 |
| 944 available += index_.header()->num_bytes; |
| 945 |
| 946 max_size_ = PreferedCacheSize(available); |
| 947 |
| 948 // Let's not use more than the default size while we tune-up the performance |
| 949 // of bigger caches. TODO(rvargas): remove this limit. |
| 950 if (max_size_ > kDefaultCacheSize * 4) |
| 951 max_size_ = kDefaultCacheSize * 4; |
| 952 } |
| 953 |
| 954 bool BackendImplV3::InitStats(void* stats_data) { |
| 955 Addr address(index_.header()->stats); |
| 956 int size = stats_.StorageSize(); |
| 957 |
| 958 if (!address.is_initialized()) { |
| 959 FileType file_type = Addr::RequiredFileType(size); |
| 960 DCHECK_NE(file_type, EXTERNAL); |
| 961 int num_blocks = Addr::RequiredBlocks(size, file_type); |
| 962 |
| 963 if (!CreateBlock(file_type, num_blocks, &address)) |
| 964 return false; |
| 965 return stats_.Init(NULL, 0, address); |
| 966 } |
| 967 |
| 968 // Load the required data. |
| 969 DCHECK(address.is_block_file()); |
| 970 size = address.num_blocks() * address.BlockSize(); |
| 971 |
| 972 if (!stats_.Init(stats_data, size, address)) |
| 973 return false; |
| 974 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain()) |
| 975 stats_.InitSizeHistogram(); |
| 976 return true; |
| 977 } |
| 978 |
| 979 void BackendImplV3::StoreStats() { |
| 980 int size = stats_.StorageSize(); |
| 981 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size)); |
| 982 Addr address; |
| 983 size = stats_.SerializeStats(buffer->data(), size, &address); |
| 984 DCHECK(size); |
| 985 if (!address.is_initialized()) |
| 986 return; |
| 987 |
| 988 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_DATA); |
| 989 work_item->set_buffer(buffer); |
| 990 work_item->set_buffer_len(size); |
| 991 work_item->set_address(address); |
| 992 work_item->set_offset(0); |
| 993 PostWorkItem(work_item); |
| 994 } |
| 995 |
| 996 void BackendImplV3::RestartCache(const CompletionCallback& callback) { |
| 997 PrepareForRestart(); |
| 998 |
| 999 // Don't call Init() if directed by the unit test: we are simulating a failure |
| 1000 // trying to re-enable the cache. |
| 1001 if (user_flags_ & UNIT_TEST_MODE) { |
| 1002 init_ = true; // Let the destructor do proper cleanup. |
| 1003 } else { |
| 1004 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_RESTART); |
| 1005 work_item->set_user_callback(callback); |
| 1006 work_item->set_flags(user_flags_); |
| 1007 PostWorkItem(work_item); |
| 1008 } |
| 1009 } |
| 1010 |
| 1011 void BackendImplV3::PrepareForRestart() { |
| 1012 if (!(user_flags_ & EVICTION_V2)) |
| 1013 lru_eviction_ = true; |
| 1014 |
| 1015 disabled_ = true; |
| 1016 index_.header()->crash = 0; |
| 1017 block_files_.Clear(); |
| 1018 index_.Reset(); |
| 1019 init_ = false; |
| 1020 restarted_ = true; |
| 1021 } |
| 1022 |
| 1023 void BackendImplV3::CleanupCache() { |
| 1024 Trace("Backend Cleanup"); |
| 1025 //eviction_.Stop(); |
| 1026 timer_.reset(); |
| 1027 |
| 1028 if (init_) { |
| 1029 StoreStats(); |
| 1030 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLEANUP); |
| 1031 PostWorkItem(work_item); |
| 1032 worker_ = NULL; |
| 1033 } |
| 1034 ptr_factory_.InvalidateWeakPtrs(); |
| 1035 } |
| 1036 |
| 1037 int BackendImplV3::NewEntry(WorkItem* work_item, EntryImplV3** entry) { |
| 1038 Addr address = |
| 1039 work_item->entries()->cells[work_item->entries()->current].GetAddress(); |
| 1040 |
| 1041 // The entry could have been opened since this task was posted to the cache |
| 1042 // thread, so let's check again. |
| 1043 EntriesMap::iterator it = open_entries_.find(address.value()); |
| 1044 if (it != open_entries_.end()) { |
| 1045 // Easy job. This entry is already in memory. |
| 1046 EntryImplV3* this_entry = it->second; |
| 1047 this_entry->AddRef(); |
| 1048 this_entry->OnOpenEntry(); |
| 1049 *entry = this_entry; |
| 1050 return 0; |
| 1051 } |
| 1052 |
| 1053 // Even if the entry is not in memroy right now, it could have changed. Note |
| 1054 // that any state other than USED means we are either deleting this entry or |
| 1055 // it should be in memory. |
| 1056 uint32 hash = |
| 1057 work_item->entries()->cells[work_item->entries()->current].hash(); |
| 1058 EntryCell cell = index_.FindEntryCell(hash, address); |
| 1059 if (!cell.IsValid() || cell.state() != ENTRY_USED) |
| 1060 return ERR_INVALID_ENTRY; |
| 1061 |
| 1062 STRESS_DCHECK(block_files_.IsValid(address)); |
| 1063 |
| 1064 if (!address.SanityCheckForEntryV3()) { |
| 1065 LOG(WARNING) << "Wrong entry address."; |
| 1066 STRESS_NOTREACHED(); |
| 1067 return ERR_INVALID_ADDRESS; |
| 1068 } |
| 1069 |
| 1070 scoped_refptr<EntryImplV3> cache_entry; |
| 1071 if (address.file_type() == BLOCK_EVICTED) { |
| 1072 cache_entry = new EntryImplV3(this, address, work_item->key(), |
| 1073 work_item->short_entry_record().Pass()); |
| 1074 } else { |
| 1075 cache_entry = new EntryImplV3(this, address, work_item->key(), |
| 1076 work_item->entry_record().Pass()); |
| 1077 } |
| 1078 IncreaseNumRefs(); |
| 1079 *entry = NULL; |
| 1080 |
| 1081 if (!cache_entry->SanityCheck()) { |
| 1082 LOG(WARNING) << "Messed up entry found."; |
| 1083 STRESS_NOTREACHED(); |
| 1084 return ERR_INVALID_ENTRY; |
| 1085 } |
| 1086 |
| 1087 STRESS_DCHECK(block_files_.IsValid( |
| 1088 Addr(cache_entry->entry()->Data()->rankings_node))); |
| 1089 |
| 1090 if (!cache_entry->DataSanityCheck()) {//--------------------------------------
-------- |
| 1091 // just one path? make sure we delete the cell in the first case, and as muc
h data as we can here |
| 1092 LOG(WARNING) << "Messed up entry found."; |
| 1093 cache_entry->FixForDelete(); |
| 1094 } |
| 1095 |
| 1096 open_entries_[address.value()] = cache_entry; |
| 1097 |
| 1098 cache_entry->BeginLogging(net_log_, false); |
| 1099 cache_entry->OnOpenEntry(); |
| 1100 cache_entry.swap(entry); |
| 1101 return 0; |
| 1102 } |
| 1103 |
| 1104 EntryImplV3* BackendImplV3::LookupOpenEntry(const EntrySet& entries, |
| 1105 const std::string key) { |
| 1106 for (size_t i = 0; i < entries.cells.size(); i++) { |
| 1107 if (entries.cells[i].group() == ENTRY_EVICTED) |
| 1108 continue; |
| 1109 |
| 1110 EntryImplV3* this_entry = GetOpenEntry(entries.cells[i].GetAddress()); |
| 1111 if (this_entry && this_entry->GetKey() == key) |
| 1112 return this_entry; |
| 1113 } |
| 1114 return NULL; |
| 1115 } |
| 1116 |
| 1117 // This is the actual implementation for OpenNextEntry and OpenPrevEntry. |
| 1118 int BackendImplV3::OpenFollowingEntry(bool forward, void** iter, |
| 1119 Entry** next_entry, |
| 1120 const CompletionCallback& callback) { |
| 1121 if (disabled_) |
| 1122 return net::ERR_FAILED; |
| 1123 |
| 1124 DCHECK(iter); |
| 1125 |
| 1126 scoped_ptr<IndexIterator> iterator( |
| 1127 reinterpret_cast<IndexIterator*>(*iter)); |
| 1128 *iter = NULL; |
| 1129 |
| 1130 if (!iterator.get()) { |
| 1131 iterator.reset(new IndexIterator); |
| 1132 iterator->timestamp = index_.GetTimestamp(GetCurrentTime()) + 1; |
| 1133 iterator->forward = forward; |
| 1134 } |
| 1135 |
| 1136 // Prepare to read the first entry from disk. |
| 1137 scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY); |
| 1138 work_item->set_flags(WorkItem::WORK_FOR_ITERATION); |
| 1139 work_item->set_iterator(iterator.Pass()); |
| 1140 work_item->set_iter_buffer(iter); |
| 1141 work_item->set_entry_buffer(next_entry); |
| 1142 |
| 1143 if (!OpenNext(work_item)) |
| 1144 return net::ERR_FAILED; |
| 1145 |
| 1146 work_item->set_user_callback(callback); |
| 1147 return net::ERR_IO_PENDING; |
| 1148 } |
| 1149 |
| 1150 bool BackendImplV3::GetMoreCells(WorkItem* work_item) { |
| 1151 DCHECK(work_item->flags() & WorkItem::WORK_FOR_ITERATION); |
| 1152 IndexIterator* iterator = work_item->iterator(); |
| 1153 |
| 1154 if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) { |
| 1155 int lower_limit = index_.GetTimestamp(work_item->initial_time()); |
| 1156 if (iterator->timestamp <= lower_limit || |
| 1157 !index_.GetNextCells(iterator)) { |
| 1158 return false; |
| 1159 } |
| 1160 return true; |
| 1161 } |
| 1162 |
| 1163 return index_.GetNextCells(iterator); |
| 1164 } |
| 1165 |
| 1166 bool BackendImplV3::OpenNext(WorkItem* work_item) { |
| 1167 Trace("OpenNext work item 0x%p", work_item); |
| 1168 CellList* cells = &work_item->iterator()->cells; |
| 1169 EntrySet entries; |
| 1170 for (;;) { |
| 1171 if (cells->empty()) { |
| 1172 if (!GetMoreCells(work_item)) { |
| 1173 if (!work_item->user_callback().is_null()) { |
| 1174 int result = work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE ? |
| 1175 net::OK : net::ERR_FAILED; |
| 1176 work_item->user_callback().Run(result); |
| 1177 } |
| 1178 return false; |
| 1179 } |
| 1180 DCHECK(!cells->empty()); |
| 1181 } |
| 1182 |
| 1183 while (!cells->empty()) { |
| 1184 EntryCell last_cell = index_.FindEntryCell(cells->back().hash, |
| 1185 cells->back().address); |
| 1186 cells->pop_back(); |
| 1187 if (!last_cell.IsValid()) |
| 1188 continue; |
| 1189 |
| 1190 entries.cells.push_back(last_cell); |
| 1191 break; |
| 1192 } |
| 1193 |
| 1194 if (!entries.cells.empty()) |
| 1195 break; |
| 1196 } |
| 1197 |
| 1198 //+ see if the entry is currently open. |
| 1199 |
| 1200 work_item->set_entries(entries); |
| 1201 PostWorkItem(work_item); |
| 1202 return true; |
| 1203 } |
| 1204 |
| 1205 void BackendImplV3::Doom(EntryImplV3* entry, WorkItem* work_item) { |
| 1206 if (entry->GetLastUsed() >= work_item->initial_time() && |
| 1207 entry->GetLastUsed() < work_item->end_time()) { |
| 1208 //+change the selected cell to open state |
| 1209 Trace("Doom 0x%p work item 0x%p", entry, work_item); |
| 1210 entry->Doom(); |
| 1211 } |
| 1212 entry->Close(); |
| 1213 } |
| 1214 |
| 1215 void BackendImplV3::UpdateIterator(EntryImplV3* entry, WorkItem* work_item) { |
| 1216 *work_item->iter_buffer() = work_item->ReleaseIterator(); |
| 1217 *work_item->entry_buffer() = entry; |
| 1218 work_item->user_callback().Run(net::OK); |
| 1219 } |
| 1220 |
| 1221 void BackendImplV3::AddStorageSize(int32 bytes) { |
| 1222 index_.header()->num_bytes += bytes; |
| 1223 DCHECK_GE(index_.header()->num_bytes, 0); |
| 1224 } |
| 1225 |
| 1226 void BackendImplV3::SubstractStorageSize(int32 bytes) { |
| 1227 index_.header()->num_bytes -= bytes; |
| 1228 DCHECK_GE(index_.header()->num_bytes, 0); |
| 1229 } |
| 1230 |
| 1231 void BackendImplV3::IncreaseNumRefs() { |
| 1232 num_refs_++; |
| 1233 if (max_refs_ < num_refs_) |
| 1234 max_refs_ = num_refs_; |
| 1235 } |
| 1236 |
| 1237 void BackendImplV3::DecreaseNumRefs() { |
| 1238 DCHECK(num_refs_); |
| 1239 num_refs_--; |
| 1240 } |
| 1241 |
| 1242 void BackendImplV3::IncreaseNumEntries() { |
| 1243 index_.header()->num_entries++; |
| 1244 DCHECK_GT(index_.header()->num_entries, 0); |
| 1245 } |
| 1246 |
| 1247 void BackendImplV3::DecreaseNumEntries() { |
| 1248 index_.header()->num_entries--; |
| 1249 if (index_.header()->num_entries < 0) { |
| 1250 NOTREACHED(); |
| 1251 index_.header()->num_entries = 0; |
| 1252 } |
| 1253 } |
| 1254 |
| 1255 void BackendImplV3::PostWorkItem(WorkItem* work_item) { |
| 1256 if (!worker_) |
| 1257 return; |
| 1258 Trace("Post task 0x%p %d flags 0x%x", work_item, work_item->type(), |
| 1259 work_item->flags()); |
| 1260 |
| 1261 // Long story short: we expect to see the work item back on this thread. |
| 1262 // If the task is not executed we'll leak work_item, but that should only |
| 1263 // happen at shutdown. |
| 1264 work_item->AddRef(); |
| 1265 work_item->set_closure(base::Bind(&BackendImplV3::OnWorkDone, |
| 1266 ptr_factory_.GetWeakPtr())); |
| 1267 cache_thread_->PostTask( |
| 1268 FROM_HERE, |
| 1269 base::Bind(&BackendImplV3::Worker::OnDoWork, worker_, work_item)); |
| 1270 } |
| 1271 |
| 1272 void BackendImplV3::OnWorkDone(WorkItem* work_item) { |
| 1273 Trace("Task done 0x%p %d flags 0x%x", work_item, work_item->type(), |
| 1274 work_item->flags()); |
| 1275 // Balance the reference from PostWorkItem. |
| 1276 scoped_refptr<WorkItem> my_work_item; |
| 1277 my_work_item.swap(&work_item); |
| 1278 |
| 1279 if (!worker_) { |
| 1280 // This may be called after CleanupForTest was called. |
| 1281 if (!my_work_item->user_callback().is_null()) |
| 1282 my_work_item->user_callback().Run(my_work_item->result()); |
| 1283 return; |
| 1284 } |
| 1285 |
| 1286 switch (my_work_item->type()) { |
| 1287 case WorkItem::WORK_INIT: return OnInitComplete(my_work_item); |
| 1288 case WorkItem::WORK_RESTART: return OnInitComplete(my_work_item); |
| 1289 case WorkItem::WORK_GROW_INDEX: return OnGrowIndexComplete(my_work_item); |
| 1290 case WorkItem::WORK_GROW_FILES: return OnGrowFilesComplete(my_work_item); |
| 1291 case WorkItem::WORK_OPEN_ENTRY: return OnOpenEntryComplete(my_work_item); |
| 1292 default: return OnOperationComplete(my_work_item); |
| 1293 } |
| 1294 } |
| 1295 |
| 1296 void BackendImplV3::OnInitComplete(WorkItem* work_item) { |
| 1297 int rv = work_item->result(); |
| 1298 if (rv != ERR_NO_ERROR && rv != ERR_CACHE_CREATED && |
| 1299 rv != ERR_PREVIOUS_CRASH) { |
| 1300 ReportError(rv); |
| 1301 return work_item->user_callback().Run(net::ERR_FAILED); |
| 1302 } |
| 1303 |
| 1304 #if defined(NET_BUILD_STRESS_CACHE) |
| 1305 // Start evictions right away. |
| 1306 up_ticks_ = kTrimDelay * 2; |
| 1307 #endif |
| 1308 DCHECK(!init_); |
| 1309 |
| 1310 num_refs_ = max_refs_ = 0; |
| 1311 entry_count_ = byte_count_ = 0; |
| 1312 |
| 1313 if (!restarted_) { |
| 1314 buffer_bytes_ = 0; |
| 1315 trace_object_ = TraceObject::GetTraceObject(); |
| 1316 // Create a recurrent timer of 30 secs. |
| 1317 int timer_delay = user_flags_ & UNIT_TEST_MODE ? 1000 : |
| 1318 kTimerSeconds * 1000; |
| 1319 timer_.reset(new base::RepeatingTimer<BackendImplV3>()); |
| 1320 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, |
| 1321 &BackendImplV3::OnStatsTimer); |
| 1322 } |
| 1323 Trace("Init"); |
| 1324 init_ = true; |
| 1325 |
| 1326 scoped_ptr<InitResult> result = work_item->init_result(); |
| 1327 index_.Init(result.get()); |
| 1328 |
| 1329 if (index_.header()->experiment != NO_EXPERIMENT && |
| 1330 cache_type_ != net::DISK_CACHE) { |
| 1331 // No experiment for other caches. |
| 1332 ReportError(ERR_INIT_FAILED); |
| 1333 return work_item->user_callback().Run(net::ERR_FAILED); |
| 1334 } |
| 1335 |
| 1336 if (!(user_flags_ & BASIC_UNIT_TEST)) { |
| 1337 // The unit test controls directly what to test. |
| 1338 lru_eviction_ = (cache_type_ != net::DISK_CACHE); |
| 1339 } |
| 1340 |
| 1341 if (!CheckIndex()) { |
| 1342 ReportError(ERR_INIT_FAILED); |
| 1343 return work_item->user_callback().Run(net::ERR_FAILED); |
| 1344 } |
| 1345 AdjustMaxCacheSize(); |
| 1346 |
| 1347 block_files_.Init(result->block_bitmaps); |
| 1348 |
| 1349 // We want to minimize the changes to cache for an AppCache. |
| 1350 if (cache_type() == net::APP_CACHE) { |
| 1351 DCHECK(lru_eviction_); |
| 1352 read_only_ = true; |
| 1353 } else if (cache_type() == net::SHADER_CACHE) { |
| 1354 DCHECK(lru_eviction_); |
| 1355 } |
| 1356 |
| 1357 eviction_.Init(this); |
| 1358 |
| 1359 int64 errors, full_dooms, partial_dooms, last_report; |
| 1360 errors = full_dooms = partial_dooms = last_report = 0; |
| 1361 if (work_item->type() == WorkItem::WORK_RESTART) { |
| 1362 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR); |
| 1363 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE); |
| 1364 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT); |
| 1365 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT); |
| 1366 } |
| 1367 |
| 1368 if (!InitStats(result->stats_data.get())) { |
| 1369 ReportError(ERR_INIT_FAILED); |
| 1370 return work_item->user_callback().Run(net::ERR_FAILED); |
| 1371 } |
| 1372 |
| 1373 disabled_ = false; |
| 1374 |
| 1375 #if defined(STRESS_CACHE_EXTENDED_VALIDATION) |
| 1376 trace_object_->EnableTracing(false); |
| 1377 int sc = SelfCheck(); |
| 1378 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) |
| 1379 NOTREACHED(); |
| 1380 trace_object_->EnableTracing(true); |
| 1381 #endif |
| 1382 |
| 1383 if (work_item->type() == WorkItem::WORK_RESTART) { |
| 1384 stats_.SetCounter(Stats::FATAL_ERROR, errors); |
| 1385 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms); |
| 1386 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms); |
| 1387 stats_.SetCounter(Stats::LAST_REPORT, last_report); |
| 1388 } |
| 1389 |
| 1390 ReportError(rv); |
| 1391 return work_item->user_callback().Run(net::OK); |
| 1392 } |
| 1393 |
| 1394 void BackendImplV3::OnGrowIndexComplete(WorkItem* work_item) { |
| 1395 if (work_item->result() != ERR_NO_ERROR || disabled_ || |
| 1396 (work_item->flags() & WorkItem::WORK_COMPLETE)) { |
| 1397 growing_index_ = false; |
| 1398 return; |
| 1399 } |
| 1400 |
| 1401 scoped_ptr<InitResult> result = work_item->init_result(); |
| 1402 index_.Init(result.get()); |
| 1403 work_item->set_flags(WorkItem::WORK_COMPLETE); |
| 1404 PostWorkItem(work_item); |
| 1405 } |
| 1406 |
| 1407 void BackendImplV3::OnGrowFilesComplete(WorkItem* work_item) { |
| 1408 if (work_item->result() != ERR_NO_ERROR || disabled_ || |
| 1409 (work_item->flags() & WorkItem::WORK_COMPLETE)) { |
| 1410 growing_files_ = false; |
| 1411 return; |
| 1412 } |
| 1413 |
| 1414 scoped_ptr<InitResult> result = work_item->init_result(); |
| 1415 block_files_.Init(result->block_bitmaps); |
| 1416 work_item->set_flags(WorkItem::WORK_COMPLETE); |
| 1417 PostWorkItem(work_item); |
| 1418 } |
| 1419 |
| 1420 void BackendImplV3::OnOperationComplete(WorkItem* work_item) { |
| 1421 if (work_item->result() < 0 && work_item->owner_entry()) { |
| 1422 // Make sure that there's a call to Close() after Doom(). |
| 1423 work_item->owner_entry()->AddRef(); |
| 1424 work_item->owner_entry()->Doom(); |
| 1425 work_item->owner_entry()->Close(); |
| 1426 } |
| 1427 |
| 1428 if (!work_item->user_callback().is_null()) |
| 1429 work_item->user_callback().Run(work_item->result()); |
| 1430 } |
| 1431 |
| 1432 |
| 1433 void BackendImplV3::OnOpenEntryComplete(WorkItem* work_item) { |
| 1434 Trace("Open complete"); |
| 1435 if (work_item->flags() & WorkItem::WORK_FOR_RESURRECT) |
| 1436 return OnOpenForResurrectComplete(work_item); |
| 1437 |
| 1438 if (work_item->flags() & WorkItem::WORK_FOR_EVICT) |
| 1439 return OnEvictEntryComplete(work_item); |
| 1440 |
| 1441 if (work_item->flags() & WorkItem::WORK_FOR_ITERATION) |
| 1442 return OnOpenNextComplete(work_item); |
| 1443 |
| 1444 if (work_item->result() == ERR_NO_ERROR) { |
| 1445 EntryImplV3* entry; |
| 1446 int error = NewEntry(work_item, &entry); |
| 1447 if (!error) { |
| 1448 //+change the selected cell to open state |
| 1449 |
| 1450 if (work_item->flags() & WorkItem::WORK_FOR_DOOM) { |
| 1451 entry->Doom(); |
| 1452 entry->Close(); |
| 1453 } else { |
| 1454 eviction_.OnOpenEntry(entry); |
| 1455 entry_count_++; |
| 1456 if (work_item->flags() & WorkItem::WORK_FOR_UPDATE) { |
| 1457 UpdateRank(entry, true); |
| 1458 return; |
| 1459 } |
| 1460 *work_item->entry_buffer() = entry; |
| 1461 |
| 1462 Trace("Open hash 0x%x end: 0x%x", entry->GetHash(), |
| 1463 entry->GetAddress().value()); |
| 1464 stats_.OnEvent(Stats::OPEN_HIT); |
| 1465 SIMPLE_STATS_COUNTER("disk_cache.hit"); |
| 1466 } |
| 1467 |
| 1468 work_item->user_callback().Run(net::OK); |
| 1469 return; |
| 1470 } |
| 1471 } |
| 1472 |
| 1473 if (work_item->entries()->current >= work_item->entries()->cells.size() - 1) { |
| 1474 // Not found. |
| 1475 work_item->user_callback().Run(net::ERR_FAILED); |
| 1476 return; |
| 1477 } |
| 1478 |
| 1479 //+post a task to delete the cell |
| 1480 |
| 1481 // Open the next entry on the list. |
| 1482 work_item->entries()->current++; |
| 1483 if (work_item->entries()->current < work_item->entries()->cells.size()) |
| 1484 PostWorkItem(work_item); |
| 1485 } |
| 1486 |
| 1487 void BackendImplV3::OnOpenForResurrectComplete(WorkItem* work_item) { |
| 1488 if (work_item->result() == ERR_NO_ERROR) { |
| 1489 EntryImplV3* deleted_entry; |
| 1490 int error = NewEntry(work_item, &deleted_entry); |
| 1491 if (!error) { |
| 1492 scoped_ptr<ShortEntryRecord> entry_record = |
| 1493 deleted_entry->GetShortEntryRecord(); |
| 1494 CHECK(entry_record); |
| 1495 if (!entry_record) { |
| 1496 // This is an active entry. |
| 1497 deleted_entry->Close(); |
| 1498 stats_.OnEvent(Stats::CREATE_MISS); |
| 1499 Trace("create entry miss "); |
| 1500 work_item->user_callback().Run(net::ERR_FAILED);//doesn't make any sense |
| 1501 return; |
| 1502 } |
| 1503 |
| 1504 // We are attempting to create an entry and found out that the entry was |
| 1505 // previously deleted. |
| 1506 |
| 1507 stats_.OnEvent(Stats::RESURRECT_HIT); |
| 1508 Trace("Resurrect entry hit "); |
| 1509 deleted_entry->Doom(); |
| 1510 deleted_entry->Close(); |
| 1511 |
| 1512 int rv = |
| 1513 OnCreateEntryComplete(work_item->key(), deleted_entry->GetHash(), |
| 1514 entry_record.get(), work_item->entry_buffer(), |
| 1515 work_item->user_callback()); |
| 1516 DCHECK_EQ(rv, net::OK); |
| 1517 return; |
| 1518 } |
| 1519 } |
| 1520 |
| 1521 if (work_item->entries()->current >= work_item->entries()->cells.size()) { |
| 1522 // Not found. |
| 1523 work_item->user_callback().Run(net::ERR_FAILED); |
| 1524 return; |
| 1525 } |
| 1526 |
| 1527 //+post a task to delete the cell |
| 1528 |
| 1529 // Open the next entry on the list. |
| 1530 work_item->entries()->current++; |
| 1531 if (work_item->entries()->current < work_item->entries()->cells.size()) |
| 1532 PostWorkItem(work_item); |
| 1533 } |
| 1534 |
| 1535 void BackendImplV3::OnEvictEntryComplete(WorkItem* work_item) { |
| 1536 if (work_item->result() != ERR_NO_ERROR) |
| 1537 return eviction_.OnEvictEntryComplete(); |
| 1538 |
| 1539 EntryCell old_cell = |
| 1540 index_.FindEntryCell(work_item->entries()->cells[0].hash(), |
| 1541 work_item->entries()->cells[0].GetAddress()); |
| 1542 DCHECK(old_cell.IsValid()); |
| 1543 |
| 1544 if (!(work_item->flags() & WorkItem::WORK_NO_COPY)) { |
| 1545 EntryCell new_cell = |
| 1546 index_.FindEntryCell(work_item->entries()->cells[1].hash(), |
| 1547 work_item->entries()->cells[1].GetAddress()); |
| 1548 DCHECK(new_cell.IsValid()); |
| 1549 } |
| 1550 |
| 1551 EntryImplV3* entry; |
| 1552 int error = NewEntry(work_item, &entry); |
| 1553 if (!error) { |
| 1554 //+change the selected cell to open state |
| 1555 entry->Doom(); |
| 1556 entry->Close(); |
| 1557 } |
| 1558 |
| 1559 //+delete old_cell after a timer (so add to deleted entries). |
| 1560 |
| 1561 eviction_.OnEvictEntryComplete(); |
| 1562 } |
| 1563 |
| 1564 void BackendImplV3::OnOpenNextComplete(WorkItem* work_item) { |
| 1565 Trace("OpenNext complete, work item 0x%p", work_item); |
| 1566 if (work_item->result() != ERR_NO_ERROR) { |
| 1567 OpenNext(work_item); |
| 1568 return; |
| 1569 } |
| 1570 |
| 1571 EntryImplV3* entry; |
| 1572 int error = NewEntry(work_item, &entry); |
| 1573 if (!error) { |
| 1574 if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) |
| 1575 Doom(entry, work_item); |
| 1576 else |
| 1577 return UpdateIterator(entry, work_item); |
| 1578 } |
| 1579 |
| 1580 // Grab another entry. |
| 1581 OpenNext(work_item); |
| 1582 } |
| 1583 |
| 1584 int BackendImplV3::OnCreateEntryComplete(const std::string& key, uint32 hash, |
| 1585 ShortEntryRecord* short_record, |
| 1586 Entry** entry, |
| 1587 const CompletionCallback& callback) { |
| 1588 // Create a new object in memory and return it to the caller. |
| 1589 Addr entry_address; |
| 1590 Trace("Create complete hash 0x%x", hash); |
| 1591 if (!block_files_.CreateBlock(BLOCK_ENTRIES, 1, &entry_address)) { |
| 1592 LOG(ERROR) << "Create entry failed " << key.c_str(); |
| 1593 stats_.OnEvent(Stats::CREATE_ERROR); |
| 1594 return net::ERR_FAILED; |
| 1595 } |
| 1596 |
| 1597 EntryCell cell = index_.CreateEntryCell(hash, entry_address); |
| 1598 if (!cell.IsValid()) { |
| 1599 block_files_.DeleteBlock(entry_address); |
| 1600 return net::ERR_FAILED; |
| 1601 } |
| 1602 |
| 1603 scoped_refptr<EntryImplV3> cache_entry( |
| 1604 new EntryImplV3(this, cell.GetAddress(), false)); |
| 1605 IncreaseNumRefs(); |
| 1606 |
| 1607 cache_entry->CreateEntry(key, hash, short_record); |
| 1608 cache_entry->BeginLogging(net_log_, true); |
| 1609 |
| 1610 // We are not failing the operation; let's add this to the map. |
| 1611 open_entries_[cell.GetAddress().value()] = cache_entry; |
| 1612 |
| 1613 IncreaseNumEntries(); |
| 1614 entry_count_++; |
| 1615 |
| 1616 if (short_record) |
| 1617 eviction_.OnResurrectEntry(cache_entry); |
| 1618 else |
| 1619 eviction_.OnCreateEntry(cache_entry); |
| 1620 |
| 1621 stats_.OnEvent(Stats::CREATE_HIT); |
| 1622 SIMPLE_STATS_COUNTER("disk_cache.miss"); |
| 1623 Trace("create entry hit "); |
| 1624 cache_entry->AddRef(); |
| 1625 *entry = cache_entry.get(); |
| 1626 |
| 1627 if (short_record) |
| 1628 callback.Run(net::OK); |
| 1629 |
| 1630 return net::OK; |
| 1631 } |
| 1632 |
| 1633 void BackendImplV3::LogStats() { |
| 1634 StatsItems stats; |
| 1635 GetStats(&stats); |
| 1636 |
| 1637 for (size_t index = 0; index < stats.size(); index++) |
| 1638 VLOG(1) << stats[index].first << ": " << stats[index].second; |
| 1639 } |
| 1640 |
| 1641 void BackendImplV3::ReportStats() { |
| 1642 IndexHeaderV3* header = index_.header(); |
| 1643 CACHE_UMA(COUNTS, "Entries", 0, header->num_entries); |
| 1644 |
| 1645 int current_size = header->num_bytes / (1024 * 1024); |
| 1646 int max_size = max_size_ / (1024 * 1024); |
| 1647 |
| 1648 CACHE_UMA(COUNTS_10000, "Size", 0, current_size); |
| 1649 CACHE_UMA(COUNTS_10000, "MaxSize", 0, max_size); |
| 1650 if (!max_size) |
| 1651 max_size++; |
| 1652 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size); |
| 1653 |
| 1654 CACHE_UMA(COUNTS_10000, "AverageOpenEntries", 0, |
| 1655 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); |
| 1656 CACHE_UMA(COUNTS_10000, "MaxOpenEntries", 0, |
| 1657 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); |
| 1658 stats_.SetCounter(Stats::MAX_ENTRIES, 0); |
| 1659 |
| 1660 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0, |
| 1661 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); |
| 1662 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0, |
| 1663 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); |
| 1664 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0, |
| 1665 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); |
| 1666 stats_.SetCounter(Stats::FATAL_ERROR, 0); |
| 1667 stats_.SetCounter(Stats::DOOM_CACHE, 0); |
| 1668 stats_.SetCounter(Stats::DOOM_RECENT, 0); |
| 1669 |
| 1670 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120; |
| 1671 if (!(header->flags & CACHE_EVICTED)) { |
| 1672 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours)); |
| 1673 return; |
| 1674 } |
| 1675 |
| 1676 // This is an up to date client that will report FirstEviction() data. After |
| 1677 // that event, start reporting this: |
| 1678 |
| 1679 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours)); |
| 1680 |
| 1681 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120; |
| 1682 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER)); |
| 1683 |
| 1684 // We may see users with no use_hours at this point if this is the first time |
| 1685 // we are running this code. |
| 1686 if (use_hours) |
| 1687 use_hours = total_hours - use_hours; |
| 1688 |
| 1689 if (!use_hours || !GetEntryCount() || !header->num_bytes) |
| 1690 return; |
| 1691 |
| 1692 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours)); |
| 1693 |
| 1694 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours; |
| 1695 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate)); |
| 1696 |
| 1697 int avg_size = header->num_bytes / GetEntryCount(); |
| 1698 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); |
| 1699 CACHE_UMA(COUNTS, "EntriesFull", 0, header->num_entries); |
| 1700 |
| 1701 int large_entries_bytes = stats_.GetLargeEntriesSize(); |
| 1702 int large_ratio = large_entries_bytes * 100 / header->num_bytes; |
| 1703 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio); |
| 1704 |
| 1705 if (!lru_eviction_) { |
| 1706 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio()); |
| 1707 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, |
| 1708 header->num_no_use_entries * 100 / header->num_entries); |
| 1709 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, |
| 1710 header->num_low_use_entries * 100 / header->num_entries); |
| 1711 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, |
| 1712 header->num_high_use_entries * 100 / header->num_entries); |
| 1713 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0, |
| 1714 header->num_evicted_entries * 100 / header->num_entries); |
| 1715 } |
| 1716 |
| 1717 stats_.ResetRatios(); |
| 1718 stats_.SetCounter(Stats::TRIM_ENTRY, 0); |
| 1719 |
| 1720 if (cache_type_ == net::DISK_CACHE) |
| 1721 block_files_.ReportStats(); |
| 1722 } |
| 1723 |
| 1724 void BackendImplV3::ReportError(int error) { |
| 1725 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH || |
| 1726 error == ERR_CACHE_CREATED); |
| 1727 |
| 1728 // We transmit positive numbers, instead of direct error codes. |
| 1729 DCHECK_LE(error, 0); |
| 1730 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1); |
| 1731 } |
| 1732 |
| 1733 bool BackendImplV3::CheckIndex() { |
| 1734 if (index_.header()->flags & CACHE_EVICTION_2) |
| 1735 lru_eviction_ = false; |
| 1736 |
| 1737 /* |
| 1738 if (!index_.header()->table_len) { |
| 1739 LOG(ERROR) << "Invalid table size"; |
| 1740 return false; |
| 1741 } |
| 1742 |
| 1743 if (current_size < GetIndexSize(index_.header()->table_len) || |
| 1744 index_.header()->table_len & (kBaseTableLen - 1)) { |
| 1745 LOG(ERROR) << "Corrupt Index file"; |
| 1746 return false; |
| 1747 } |
| 1748 |
| 1749 AdjustMaxCacheSize(index_.header()->table_len); |
| 1750 |
| 1751 #if !defined(NET_BUILD_STRESS_CACHE) |
| 1752 if (index_.header()->num_bytes < 0 || |
| 1753 (max_size_ < kint32max - kDefaultCacheSize && |
| 1754 index_.header()->num_bytes > max_size_ + kDefaultCacheSize)) { |
| 1755 LOG(ERROR) << "Invalid cache (current) size"; |
| 1756 return false; |
| 1757 } |
| 1758 #endif |
| 1759 |
| 1760 if (index_.header()->num_entries < 0) { |
| 1761 LOG(ERROR) << "Invalid number of entries"; |
| 1762 return false; |
| 1763 } |
| 1764 |
| 1765 if (!mask_) |
| 1766 mask_ = index_.header()->table_len - 1; |
| 1767 |
| 1768 // Load the table into memory with a single read. |
| 1769 scoped_array<char> buf(new char[current_size]); |
| 1770 return index_->Read(buf.get(), current_size, 0); |
| 1771 */ |
| 1772 |
| 1773 //Make sure things look fine, maybe scan the whole thing if not. |
| 1774 return true; |
| 1775 } |
| 1776 |
| 1777 int BackendImplV3::CheckAllEntries() { |
| 1778 /* |
| 1779 int num_dirty = 0; |
| 1780 int num_entries = 0; |
| 1781 DCHECK(mask_ < kuint32max); |
| 1782 for (unsigned int i = 0; i <= mask_; i++) { |
| 1783 Addr address(data_->table[i]); |
| 1784 if (!address.is_initialized()) |
| 1785 continue; |
| 1786 for (;;) { |
| 1787 EntryImplV3* tmp; |
| 1788 int ret = NewEntry(address, &tmp); |
| 1789 if (ret) { |
| 1790 STRESS_NOTREACHED(); |
| 1791 return ret; |
| 1792 } |
| 1793 scoped_refptr<EntryImplV3> cache_entry; |
| 1794 cache_entry.swap(&tmp); |
| 1795 |
| 1796 if (cache_entry->dirty()) |
| 1797 num_dirty++; |
| 1798 else if (CheckEntry(cache_entry.get())) |
| 1799 num_entries++; |
| 1800 else |
| 1801 return ERR_INVALID_ENTRY; |
| 1802 |
| 1803 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_); |
| 1804 address.set_value(cache_entry->GetNextAddress()); |
| 1805 if (!address.is_initialized()) |
| 1806 break; |
| 1807 } |
| 1808 } |
| 1809 |
| 1810 Trace("CheckAllEntries End"); |
| 1811 if (num_entries + num_dirty != index_.header()->num_entries) { |
| 1812 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << |
| 1813 " " << index_.header()->num_entries; |
| 1814 DCHECK_LT(num_entries, index_.header()->num_entries); |
| 1815 return ERR_NUM_ENTRIES_MISMATCH; |
| 1816 } |
| 1817 |
| 1818 return num_dirty; |
| 1819 */ |
| 1820 return 0; |
| 1821 } |
| 1822 |
| 1823 bool BackendImplV3::CheckEntry(EntryImplV3* cache_entry) { |
| 1824 /* |
| 1825 bool ok = block_files_.IsValid(cache_entry->entry()->address()); |
| 1826 ok = ok && block_files_.IsValid(cache_entry->rankings()->address()); |
| 1827 EntryStore* data = cache_entry->entry()->Data(); |
| 1828 for (size_t i = 0; i < arraysize(data->data_addr); i++) { |
| 1829 if (data->data_addr[i]) { |
| 1830 Addr address(data->data_addr[i]); |
| 1831 if (address.is_block_file()) |
| 1832 ok = ok && block_files_.IsValid(address); |
| 1833 } |
| 1834 } |
| 1835 |
| 1836 return ok && cache_entry->rankings()->VerifyHash(); |
| 1837 */ |
| 1838 return true; |
| 1839 } |
| 1840 |
| 1841 int BackendImplV3::MaxBuffersSize() { |
| 1842 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory(); |
| 1843 static bool done = false; |
| 1844 |
| 1845 if (!done) { |
| 1846 const int kMaxBuffersSize = 30 * 1024 * 1024; |
| 1847 |
| 1848 // We want to use up to 2% of the computer's memory. |
| 1849 total_memory = total_memory * 2 / 100; |
| 1850 if (total_memory > kMaxBuffersSize || total_memory <= 0) |
| 1851 total_memory = kMaxBuffersSize; |
| 1852 |
| 1853 done = true; |
| 1854 } |
| 1855 |
| 1856 return static_cast<int>(total_memory); |
| 1857 } |
| 1858 |
| 1859 } // namespace disk_cache |
OLD | NEW |