Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(92)

Unified Diff: net/disk_cache/v3/backend_impl_v3.cc

Issue 15203004: Disk cache: Reference CL for the implementation of file format version 3. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: IndexTable review Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/disk_cache/v3/backend_impl_v3.h ('k') | net/disk_cache/v3/backend_work_item.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/disk_cache/v3/backend_impl_v3.cc
===================================================================
--- net/disk_cache/v3/backend_impl_v3.cc (revision 0)
+++ net/disk_cache/v3/backend_impl_v3.cc (revision 0)
@@ -0,0 +1,1949 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/v3/backend_impl_v3.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/file_util.h"
+#include "base/files/file_path.h"
+#include "base/hash.h"
+#include "base/message_loop.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/stats_counters.h"
+#include "base/rand_util.h"
+#include "base/string_util.h"
+#include "base/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/threading/worker_pool.h"
+#include "base/time.h"
+#include "base/timer.h"
+#include "net/base/net_errors.h"
+#include "net/base/io_buffer.h"
+#include "net/disk_cache/errors.h"
+#include "net/disk_cache/experiments.h"
+#include "net/disk_cache/file.h"
+#include "net/disk_cache/storage_block-inl.h"
+#include "net/disk_cache/v3/backend_worker.h"
+#include "net/disk_cache/v3/backend_work_item.h"
+#include "net/disk_cache/v3/disk_format_v3.h"
+#include "net/disk_cache/v3/entry_impl_v3.h"
+#include "net/disk_cache/v3/index_table.h"
+
+// This has to be defined before including histogram_macros.h from this file.
+#define NET_DISK_CACHE_BACKEND_IMPL_CC_
+#include "net/disk_cache/v3/histogram_macros.h"
+
+using base::Time;
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace {
+
+// Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
+// Note that the actual target is to keep the index table load factor under 55%
+// for most users.
+const int k64kEntriesStore = 240 * 1000 * 1000;
+const int kBaseTableLen = 64 * 1024;
+const int kDefaultCacheSize = 80 * 1024 * 1024;
+
+// Avoid trimming the cache for the first 5 minutes (10 timer ticks).
+const int kTrimDelay = 10;
+const int kTimerSeconds = 30;
+
+const size_t kMaxKeySize = 64 * 1024;
+
+int DesiredIndexTableLen(int32 storage_size) {
+ if (storage_size <= k64kEntriesStore)
+ return kBaseTableLen;
+ if (storage_size <= k64kEntriesStore * 2)
+ return kBaseTableLen * 2;
+ if (storage_size <= k64kEntriesStore * 4)
+ return kBaseTableLen * 4;
+ if (storage_size <= k64kEntriesStore * 8)
+ return kBaseTableLen * 8;
+
+ // The biggest storage_size for int32 requires a 4 MB table.
+ return kBaseTableLen * 16;
+}
+
+int MaxStorageSizeForTable(int table_len) {
+ return table_len * (k64kEntriesStore / kBaseTableLen);
+}
+
+size_t GetIndexBitmapSize(int table_len) {
+ DCHECK_LT(table_len, 1 << 22);
+ size_t base_bits = disk_cache::kBaseBitmapBytes * 8;
+ if (table_len < static_cast<int>(base_bits))
+ return sizeof(disk_cache::IndexBitmap);
+
+ size_t num_pages = (table_len / 8) - disk_cache::kBaseBitmapBytes;
+ num_pages = (num_pages + 4095) / 4096;
+ return sizeof(disk_cache::IndexHeaderV3) + num_pages * 4096;
+}
+
+} // namespace
+
+// ------------------------------------------------------------------------
+
+namespace disk_cache {
+
+// Exported by disk_cache/backend_impl.cc
+// Returns the preferred max cache size given the available disk space.
+NET_EXPORT_PRIVATE int PreferedCacheSize(int64 available);
+
+BackendImplV3::BackendImplV3(const base::FilePath& path,
+ base::MessageLoopProxy* cache_thread,
+ net::NetLog* net_log)
+ : index_(this),
+ path_(path),
+ block_files_(this),
+ max_size_(0),
+ up_ticks_(0),
+ test_seconds_(0),
+ cache_type_(net::DISK_CACHE),
+ uma_report_(0),
+ user_flags_(0),
+ init_(false),
+ restarted_(false),
+ read_only_(false),
+ disabled_(false),
+ lru_eviction_(true),
+ first_timer_(true),
+ user_load_(false),
+ growing_index_(false),
+ growing_files_(false),
+ net_log_(net_log),
+ cache_thread_(cache_thread),
+ ptr_factory_(this) {
+}
+
+BackendImplV3::~BackendImplV3() {
+ CleanupCache();
+}
+
+int BackendImplV3::Init(const CompletionCallback& callback) {
+ DCHECK(!init_);
+ if (init_)
+ return net::ERR_FAILED;
+
+ worker_ = new Worker(path_, base::MessageLoopProxy::current());
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_INIT);
+ work_item->set_user_callback(callback);
+ work_item->set_flags(user_flags_);
+ PostWorkItem(work_item);
+
+ return net::ERR_IO_PENDING;
+}
+
+// ------------------------------------------------------------------------
+
+int BackendImplV3::OpenPrevEntry(void** iter, Entry** prev_entry,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ return OpenFollowingEntry(true, iter, prev_entry, callback);
+}
+
+bool BackendImplV3::SetMaxSize(int max_bytes) {
+ COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
+ if (max_bytes < 0)
+ return false;
+
+ // Zero size means use the default.
+ if (!max_bytes)
+ return true;
+
+ // Avoid a DCHECK later on.
+ if (max_bytes >= kint32max - kint32max / 10)
+ max_bytes = kint32max - kint32max / 10 - 1;
+
+ user_flags_ |= MAX_SIZE;
+ max_size_ = max_bytes;
+ return true;
+}
+
+void BackendImplV3::SetType(net::CacheType type) {
+ DCHECK_NE(net::MEMORY_CACHE, type);
+ cache_type_ = type;
+}
+
+bool BackendImplV3::CreateBlock(FileType block_type, int block_count,
+ Addr* block_address) {
+ return block_files_.CreateBlock(block_type, block_count, block_address);
+}
+
+void BackendImplV3::UpdateRank(EntryImplV3* entry, bool modified) {
+ if (!modified && (cache_type() == net::SHADER_CACHE || read_only_))
+ return;
+
+ index_.UpdateTime(entry->GetHash(), entry->GetAddress(), GetTime());
+}
+
+void BackendImplV3::InternalDoomEntry(EntryImplV3* entry) {
+ uint32 hash = entry->GetHash();
+ std::string key = entry->GetKey();
+ Addr entry_addr = entry->GetAddress();
+
+ Trace("Doom entry 0x%p", entry);
+
+ index_.SetSate(hash, entry_addr, ENTRY_DELETED);
+
+ // The entry is transitioning from open to doomed.
+ doomed_entries_[entry_addr.value()] = entry;
+ EntriesMap::iterator it = open_entries_.find(entry_addr.value());
+ if (it != open_entries_.end())
+ open_entries_.erase(it);
+ else
+ NOTREACHED();
+
+ entry->InternalDoom();
+ DecreaseNumEntries();
+}
+
+bool BackendImplV3::ShouldDeleteNow(EntryImplV3* entry) {
+ Addr entry_addr = entry->GetAddress();
+ DCHECK(doomed_entries_.count(entry_addr.value()));
+ EntriesMap::iterator it = entries_to_delete_.find(entry_addr.value());
+ if (it == entries_to_delete_.end()) {
+ // Delay deletion until the next backup cycle.
+ entries_to_delete_[entry_addr.value()] = entry;
+ entry->AddRef();
+
+ // The entry was ready to be deleted. By opening it again we make sure
+ // we'll go again through the normal Close() logic later on, and we'll have
+ // a second chance to allow deletion.
+ entry->OnOpenEntry();
+ return false;
+ }
+
+ entries_to_delete_.erase(it);
+ return true;
+}
+
+void BackendImplV3::OnEntryCleanup(EntryImplV3* entry) {
+ // An entry may be going away pretty soon (as soon as all pending IO is done).
+ // Grab an extra reference so that the entry is alive for a little longer and
+ // we may reuse it directly.
+ if (recent_entries_.insert(entry).second)
+ entry->AddRef();
+}
+
+void BackendImplV3::OnEntryDestroyBegin(Addr address) {
+ if (disabled_)
+ return;
+ EntriesMap::iterator it = open_entries_.find(address.value());
+ if (it != open_entries_.end()) {
+ index_.SetSate(it->second->GetHash(), address, ENTRY_USED);
+ open_entries_.erase(it);
+ } else {
+ it = doomed_entries_.find(address.value());
+ if (it != doomed_entries_.end()) {
+ // All data is gone. Wait for the next backup cycle before releasing the
+ // cell itself.
+ CellInfo cell_info = { it->second->GetHash(), address };
+ deleted_entries_.push_back(cell_info);
+ doomed_entries_.erase(it);
+ }
+ }
+}
+
+void BackendImplV3::OnEntryDestroyEnd() {
+ DecreaseNumRefs();
+ if (disabled_)
+ return;
+ if (index_.header()->num_bytes > max_size_ && !read_only_ &&
+ (up_ticks_ > kTrimDelay || user_flags_ & BASIC_UNIT_TEST)) {
+ eviction_.TrimCache();
+ }
+}
+
+void BackendImplV3::OnEntryModified(EntryImplV3* entry) {
+ index_.SetSate(entry->GetHash(), entry->GetAddress(), ENTRY_MODIFIED);
+}
+
+void BackendImplV3::ReadData(EntryImplV3* entry, Addr address, int offset,
+ net::IOBuffer* buffer, int buffer_len,
+ const CompletionCallback& callback) {
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_READ_DATA);
+ work_item->set_buffer(buffer);
+ work_item->set_buffer_len(buffer_len);
+ work_item->set_address(address);
+ work_item->set_offset(offset);
+ work_item->set_user_callback(callback);
+ if (entry)
+ work_item->set_owner_entry(entry);
+
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::WriteData(EntryImplV3* entry, Addr address, int offset,
+ net::IOBuffer* buffer, int buffer_len,
+ const CompletionCallback& callback) {
+ if (!buffer_len) {
+ DCHECK(callback.is_null());
+ return;
+ }
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_DATA);
+ work_item->set_buffer(buffer);
+ work_item->set_buffer_len(buffer_len);
+ work_item->set_address(address);
+ work_item->set_offset(offset);
+ work_item->set_user_callback(callback);
+ work_item->set_owner_entry(entry);
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::MoveData(EntryImplV3* entry, Addr source,
+ Addr destination, int len,
+ const CompletionCallback& callback) {
+ DCHECK(len);
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_MOVE_DATA);
+ work_item->set_buffer_len(len);
+ work_item->set_address(source);
+ work_item->set_address2(destination);
+ work_item->set_user_callback(callback);
+ work_item->set_owner_entry(entry);
+ PostWorkItem(work_item);//+delete source
+}
+
+void BackendImplV3::Truncate(EntryImplV3* entry, Addr address, int offset) {
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_TRUNCATE);
+ work_item->set_address(address);
+ work_item->set_offset(offset);
+ work_item->set_owner_entry(entry);
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::Delete(EntryImplV3* entry, Addr address) {
+ if (disabled_)
+ return;
+ if (address.is_separate_file()) {
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_DELETE);
+ work_item->set_address(address);
+ work_item->set_owner_entry(entry);
+ PostWorkItem(work_item);
+
+ // And now delete the block itself.
+ address = address.AsBlockFile();
+ }
+
+ int size = Addr::BlockSizeForFileType(address.file_type());
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
+ memset(buffer->data(), 0, size);
+ WriteData(entry, address, 0, buffer, size, net::CompletionCallback());
+
+ block_files_.DeleteBlock(address);
+}
+
+void BackendImplV3::Close(EntryImplV3* entry, Addr address) {
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLOSE);
+ work_item->set_address(address);
+ work_item->set_owner_entry(entry);
+ PostWorkItem(work_item);
+}
+
+bool BackendImplV3::EvictEntry(uint32 hash, Addr address) {
+ EntriesMap::iterator it = open_entries_.find(address.value());
+ if (it != open_entries_.end())
+ return false;
+
+ EntryCell old_cell = index_.FindEntryCell(hash, address);
+ if (!old_cell.IsValid() || old_cell.GetState() != ENTRY_USED)
+ return false;
+
+ EntrySet entries;
+ entries.cells.push_back(old_cell);
+
+ uint32 flags = WorkItem::WORK_FOR_EVICT;
+ if (lru_eviction_) {
+ flags |= WorkItem::WORK_NO_COPY;
+ } else {
+ Addr new_address;
+ if (!block_files_.CreateBlock(BLOCK_EVICTED, 1, &new_address))
+ return false;
+
+ EntryCell new_cell = index_.CreateEntryCell(hash, new_address);
+ if (!new_cell.IsValid()) {
+ block_files_.DeleteBlock(new_address);
+ return false;
+ }
+ entries.cells.push_back(new_cell);
+ }
+
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY);
+ work_item->set_flags(flags);
+ work_item->set_entries(entries);
+ PostWorkItem(work_item);
+
+ return true;
+}
+
+EntryImplV3* BackendImplV3::GetOpenEntry(Addr address) const {
+ EntriesMap::const_iterator it = open_entries_.find(address.value());
+ if (it != open_entries_.end()) {
+ // We have this entry in memory.
+ it->second->AddRef();
+ it->second->OnOpenEntry();
+ return it->second;
+ }
+
+ return NULL;
+}
+
+int BackendImplV3::MaxFileSize() const {
+ return max_size_ / 8;
+}
+
+void BackendImplV3::ModifyStorageSize(int32 old_size, int32 new_size) {
+ if (disabled_ || old_size == new_size)
+ return;
+ if (old_size > new_size)
+ SubstractStorageSize(old_size - new_size);
+ else
+ AddStorageSize(new_size - old_size);
+
+ // Update the usage statistics.
+ stats_.ModifyStorageStats(old_size, new_size);
+}
+
+void BackendImplV3::TooMuchStorageRequested(int32 size) {
+ stats_.ModifyStorageStats(0, size);
+}
+
+bool BackendImplV3::IsAllocAllowed(int current_size, int new_size, bool force) {
+ DCHECK_GT(new_size, current_size);
+ if (!force && (user_flags_ & NO_BUFFERING))
+ return false;
+
+ int to_add = new_size - current_size;
+ if (!force && (buffer_bytes_ + to_add > MaxBuffersSize()))
+ return false;
+
+ buffer_bytes_ += to_add;
+ CACHE_UMA(COUNTS_50000, "BufferBytes", buffer_bytes_ / 1024);
+ return true;
+}
+
+void BackendImplV3::BufferDeleted(int size) {
+ DCHECK_GE(size, 0);
+ buffer_bytes_ -= size;
+ DCHECK_GE(buffer_bytes_, 0);
+}
+
+bool BackendImplV3::IsLoaded() const {
+ if (user_flags_ & NO_LOAD_PROTECTION)
+ return false;
+
+ return user_load_;
+}
+
+base::Time BackendImplV3::GetTime() const {
+ Time base_time = Time::Now();
+ if (!test_seconds_)
+ return base_time;
+
+ return base_time + TimeDelta::FromSeconds(test_seconds_);
+}
+
+std::string BackendImplV3::HistogramName(const char* name) const {
+ static const char* names[] = { "Http", "", "Media", "AppCache", "Shader" };
+ DCHECK_NE(cache_type_, net::MEMORY_CACHE);
+ return base::StringPrintf("DiskCache3.%s_%s", name, names[cache_type_]);
+}
+
+base::WeakPtr<BackendImplV3> BackendImplV3::GetWeakPtr() {
+ return ptr_factory_.GetWeakPtr();
+}
+
+// We want to remove biases from some histograms so we only send data once per
+// week.
+bool BackendImplV3::ShouldReportAgain() {
+ if (uma_report_)
+ return uma_report_ == 2;
+
+ uma_report_++;
+ int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
+ Time last_time = Time::FromInternalValue(last_report);
+ if (!last_report || (GetTime() - last_time).InDays() >= 7) {
+ stats_.SetCounter(Stats::LAST_REPORT, GetTime().ToInternalValue());
+ uma_report_++;
+ return true;
+ }
+ return false;
+}
+
+void BackendImplV3::FirstEviction() {
+ IndexHeaderV3* header = index_.header();
+ header->flags |= CACHE_EVICTED;
+ DCHECK(header->create_time);
+ if (!GetEntryCount())
+ return; // This is just for unit tests.
+
+ Time create_time = Time::FromInternalValue(header->create_time);
+ CACHE_UMA(AGE, "FillupAge", create_time);
+
+ int64 use_time = stats_.GetCounter(Stats::TIMER);
+ CACHE_UMA(HOURS, "FillupTime", static_cast<int>(use_time / 120));
+ CACHE_UMA(PERCENTAGE, "FirstHitRatio", stats_.GetHitRatio());
+
+ if (!use_time)
+ use_time = 1;
+ CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate",
+ static_cast<int>(header->num_entries / use_time));
+ CACHE_UMA(COUNTS, "FirstByteIORate",
+ static_cast<int>((header->num_bytes / 1024) / use_time));
+
+ int avg_size = header->num_bytes / GetEntryCount();
+ CACHE_UMA(COUNTS, "FirstEntrySize", avg_size);
+
+ int large_entries_bytes = stats_.GetLargeEntriesSize();
+ int large_ratio = large_entries_bytes * 100 / header->num_bytes;
+ CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", large_ratio);
+
+ if (!lru_eviction_) {
+ CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", stats_.GetResurrectRatio());
+ CACHE_UMA(PERCENTAGE, "FirstNoUseRatio",
+ header->num_no_use_entries * 100 / header->num_entries);
+ CACHE_UMA(PERCENTAGE, "FirstLowUseRatio",
+ header->num_low_use_entries * 100 / header->num_entries);
+ CACHE_UMA(PERCENTAGE, "FirstHighUseRatio",
+ header->num_high_use_entries * 100 / header->num_entries);
+ }
+
+ stats_.ResetRatios();
+}
+
+void BackendImplV3::OnEvent(Stats::Counters an_event) {
+ stats_.OnEvent(an_event);
+}
+
+void BackendImplV3::OnRead(int32 bytes) {
+ DCHECK_GE(bytes, 0);
+ byte_count_ += bytes;
+ if (byte_count_ < 0)
+ byte_count_ = kint32max;
+}
+
+void BackendImplV3::OnWrite(int32 bytes) {
+ // We use the same implementation as OnRead... just log the number of bytes.
+ OnRead(bytes);
+}
+
+void BackendImplV3::GrowBlockFiles() {
+ if (growing_files_ || disabled_)
+ return;
+ growing_files_ = true;
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_GROW_FILES);
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::OnTimerTick() {
+ if (disabled_)
+ return;
+
+ stats_.OnEvent(Stats::TIMER);
+ int64 time = stats_.GetCounter(Stats::TIMER);
+ int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
+
+ // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
+ // the bias towards 0.
+ if (num_refs_ && (current != num_refs_)) {
+ int64 diff = (num_refs_ - current) / 50;
+ if (!diff)
+ diff = num_refs_ > current ? 1 : -1;
+ current = current + diff;
+ stats_.SetCounter(Stats::OPEN_ENTRIES, current);
+ stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
+ }
+
+ CACHE_UMA(COUNTS, "NumberOfReferences", num_refs_);
+
+ CACHE_UMA(COUNTS_10000, "EntryAccessRate", entry_count_);
+ CACHE_UMA(COUNTS, "ByteIORate", byte_count_ / 1024);
+
+ // These values cover about 99.5% of the population (Oct 2011).
+ user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
+ entry_count_ = 0;
+ byte_count_ = 0;
+ up_ticks_++;
+
+ if (first_timer_) {
+ first_timer_ = false;
+ if (ShouldReportAgain())
+ ReportStats();
+ }
+
+ index_.OnBackupTimer();
+ CloseDoomedEntries();
+ ReleaseRecentEntries();
+ UpdateDeletedEntries();
+
+ // Save stats to disk at 5 min intervals.
+ if (time % 10 == 0)
+ StoreStats();
+}
+
+void BackendImplV3::SetUnitTestMode() {
+ user_flags_ |= UNIT_TEST_MODE;
+}
+
+void BackendImplV3::SetUpgradeMode() {
+ user_flags_ |= UPGRADE_MODE;
+ read_only_ = true;
+}
+
+void BackendImplV3::SetNewEviction() {
+ user_flags_ |= EVICTION_V2;
+ lru_eviction_ = false;
+}
+
+void BackendImplV3::SetFlags(uint32 flags) {
+ user_flags_ |= flags;
+}
+
+int BackendImplV3::FlushQueueForTest(const CompletionCallback& callback) {
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_NONE);
+ work_item->set_user_callback(callback);
+ PostWorkItem(work_item);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImplV3::CleanupForTest(const CompletionCallback& callback) {
+ CloseDoomedEntries();
+ ReleaseRecentEntries();
+ UpdateDeletedEntries();
+ index_.OnBackupTimer();
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLEANUP);
+ work_item->set_user_callback(callback);
+ PostWorkItem(work_item);
+ worker_ = NULL;
+ init_ = false;
+ disabled_ = true;
+ index_.Reset();
+ return net::ERR_IO_PENDING;
+}
+
+void BackendImplV3::TrimForTest(bool empty) {
+ eviction_.SetTestMode();
+ if (empty)
+ eviction_.TrimAllCache(CompletionCallback());
+ else
+ eviction_.TrimCache();
+}
+
+void BackendImplV3::TrimDeletedListForTest(bool empty) {
+ eviction_.SetTestMode();
+ eviction_.TrimDeletedList(empty);
+}
+
+void BackendImplV3::AddDelayForTest(int seconds) {
+ Trace("Add %d deconds", seconds);
+ int old_timers = test_seconds_ / kTimerSeconds;
+ test_seconds_ += seconds;
+ if (old_timers != test_seconds_ / kTimerSeconds)
+ OnTimerTick();
+}
+
+int BackendImplV3::WaitForEntryToCloseForTest(
+ const std::string& key,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ if (disabled_ || key.empty())
+ return net::ERR_FAILED;
+
+ uint32 hash = base::Hash(key);
+
+ EntrySet entries = index_.LookupEntries(hash);
+ if (!entries.cells.size())
+ return net::OK;
+
+ if (entries.cells.size() == static_cast<size_t>(entries.evicted_count))
+ return net::OK;
+
+ EntryImplV3* open_entry = LookupOpenEntry(entries, key);
+ if (open_entry) {
+ open_entry->NotifyDestructionForTest(callback);
+ open_entry->Close();
+ return net::ERR_IO_PENDING;
+ }
+
+ return net::OK;
+}
+
+int BackendImplV3::SelfCheck() {
+ if (!init_) {
+ LOG(ERROR) << "Init failed";
+ return ERR_INIT_FAILED;
+ }
+
+ /*int num_entries = rankings_.SelfCheck();
+ if (num_entries < 0) {
+ LOG(ERROR) << "Invalid rankings list, error " << num_entries;
+#if !defined(NET_BUILD_STRESS_CACHE)
+ return num_entries;
+#endif
+ }
+
+ if (num_entries != index_.header()->num_entries) {
+ LOG(ERROR) << "Number of entries mismatch";
+#if !defined(NET_BUILD_STRESS_CACHE)
+ return ERR_NUM_ENTRIES_MISMATCH;
+#endif
+ }*/
+
+ return CheckAllEntries();
+}
+
+void BackendImplV3::GrowIndex() {
+ if (growing_index_ || disabled_)
+ return;
+ growing_index_ = true;
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_GROW_INDEX);
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::SaveIndex(net::IOBuffer* buffer, int buffer_len) {
+ if (disabled_ || !buffer_len)
+ return;
+
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_INDEX);
+ work_item->set_buffer(buffer);
+ work_item->set_buffer_len(buffer_len);
+ work_item->set_offset(0);
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::DeleteCell(EntryCell cell) {
+ NOTREACHED();
+ // Post task to delete this cell.
+ // look at a local map of cells being deleted.
+}
+
+void BackendImplV3::FixCell(EntryCell cell) {
+ NOTREACHED();
+}
+
+// ------------------------------------------------------------------------
+
+net::CacheType BackendImplV3::GetCacheType() const {
+ return cache_type_;
+}
+
+int32 BackendImplV3::GetEntryCount() const {
+ if (disabled_)
+ return 0;
+ DCHECK(init_);
+ return index_.header()->num_entries;
+}
+
+int BackendImplV3::OpenEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ if (disabled_ || key.empty())
+ return net::ERR_FAILED;
+
+ uint32 hash = base::Hash(key);
+ Trace("Open hash 0x%x", hash);
+
+ EntrySet entries = index_.LookupEntries(hash);
+ if (!entries.cells.size())
+ return net::ERR_FAILED;
+
+ if (entries.cells.size() == static_cast<size_t>(entries.evicted_count))
+ return net::ERR_FAILED;
+
+ EntryImplV3* open_entry = LookupOpenEntry(entries, key);
+ if (open_entry) {
+ *entry = open_entry;
+ eviction_.OnOpenEntry(open_entry);
+ entry_count_++;
+
+ Trace("Open hash 0x%x end: 0x%x", hash, open_entry->GetAddress().value());
+ stats_.OnEvent(Stats::OPEN_HIT);
+ SIMPLE_STATS_COUNTER("disk_cache.hit");
+ return net::OK;
+ }
+
+ // Read the entry from disk.
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY);
+ work_item->set_entries(entries);
+ work_item->set_user_callback(callback);
+ work_item->set_key(key);
+ work_item->set_entry_buffer(entry);
+ PostWorkItem(work_item);
+
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImplV3::CreateEntry(const std::string& key, Entry** entry,
+ const CompletionCallback& callback) {
+ DCHECK(init_);
+ DCHECK(!callback.is_null());
+ if (disabled_ || key.empty() || key.size() > kMaxKeySize)
+ return net::ERR_FAILED;
+
+ uint32 hash = base::Hash(key);
+ Trace("Create hash 0x%x", hash);
+
+ EntrySet entries = index_.LookupEntries(hash);
+ if (entries.cells.size()) {
+ if (entries.cells.size() != static_cast<size_t>(entries.evicted_count)) {
+ // but we may have a hash collision :(. So create a work item to check it here!.
+ // kep collission specfic map
+ return net::ERR_FAILED;
+ }
+
+ // On the other hand, we have only deleted items that we may resurrect.
+ // Read the entry from disk.
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY);
+ work_item->set_flags(WorkItem::WORK_FOR_RESURRECT);
+ work_item->set_entries(entries);
+ work_item->set_user_callback(callback);
+ work_item->set_key(key);
+ work_item->set_entry_buffer(entry);
+ PostWorkItem(work_item);
+
+ return net::ERR_IO_PENDING;
+ }
+ return OnCreateEntryComplete(key, hash, NULL, entry, callback);
+}
+
+int BackendImplV3::DoomEntry(const std::string& key,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ if (disabled_ || key.empty())
+ return net::ERR_FAILED;
+
+ uint32 hash = base::Hash(key);
+ Trace("DoomEntry hash 0x%x", hash);
+
+ EntrySet entries = index_.LookupEntries(hash);
+ if (!entries.cells.size())
+ return net::ERR_FAILED;
+
+ if (entries.cells.size() == static_cast<size_t>(entries.evicted_count))
+ return net::ERR_FAILED;
+
+ EntryImplV3* open_entry = LookupOpenEntry(entries, key);
+ if (open_entry) {
+ open_entry->Doom();
+ open_entry->Close();
+ return net::OK;
+ }
+
+ // Read the entry from disk.
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY);
+ work_item->set_flags(WorkItem::WORK_FOR_DOOM);
+ work_item->set_entries(entries);
+ work_item->set_user_callback(callback);
+ work_item->set_key(key);
+ PostWorkItem(work_item);
+
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImplV3::DoomAllEntries(const CompletionCallback& callback) {
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ // This is not really an error, but it is an interesting condition.
+ ReportError(ERR_CACHE_DOOMED);
+ stats_.OnEvent(Stats::DOOM_CACHE);
+ if (!num_refs_) {
+ RestartCache(callback);
+ return init_ ? net::OK : net::ERR_IO_PENDING;
+ }
+ return eviction_.TrimAllCache(callback);
+}
+
+int BackendImplV3::DoomEntriesBetween(base::Time initial_time,
+ base::Time end_time,
+ const CompletionCallback& callback) {
+ DCHECK_NE(net::APP_CACHE, cache_type_);
+ Time now = GetTime();
+ if (end_time.is_null() || end_time > now)
+ end_time = now;
+
+ DCHECK(end_time >= initial_time);
+
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ scoped_ptr<IndexIterator> to_delete(new IndexIterator);
+ to_delete->forward = false;
+ to_delete->timestamp = index_.CalculateTimestamp(end_time) + 1;
+
+ // Prepare to read the first entry from disk.
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY);
+ work_item->set_flags(WorkItem::WORK_FOR_ITERATION |
+ WorkItem::WORK_FOR_DOOM_RANGE);
+ work_item->set_initial_time(initial_time);
+ work_item->set_end_time(end_time);
+ work_item->set_iterator(to_delete.Pass());
+
+ if (OpenNext(work_item) != net::ERR_IO_PENDING)
+ return net::OK;
+
+ work_item->set_user_callback(callback);
+ return net::ERR_IO_PENDING;
+}
+
+int BackendImplV3::DoomEntriesSince(base::Time initial_time,
+ const CompletionCallback& callback) {
+ DCHECK_NE(net::APP_CACHE, cache_type_);
+ return DoomEntriesBetween(initial_time, GetTime(), callback);
+}
+
+int BackendImplV3::OpenNextEntry(void** iter, Entry** next_entry,
+ const CompletionCallback& callback) {
+ DCHECK(!callback.is_null());
+ return OpenFollowingEntry(false, iter, next_entry, callback);
+}
+
+void BackendImplV3::EndEnumeration(void** iter) {
+ scoped_ptr<IndexIterator> iterator(
+ reinterpret_cast<IndexIterator*>(*iter));
+ *iter = NULL;
+}
+
+void BackendImplV3::GetStats(StatsItems* stats) {
+ if (disabled_)
+ return;
+
+ std::pair<std::string, std::string> item;
+
+ item.first = "Entries";
+ item.second = base::StringPrintf("%d", index_.header()->num_entries);
+ stats->push_back(item);
+
+ item.first = "Max size";
+ item.second = base::StringPrintf("%d", max_size_);
+ stats->push_back(item);
+
+ item.first = "Current size";
+ item.second = base::StringPrintf("%d", index_.header()->num_bytes);
+ stats->push_back(item);
+
+ stats_.GetItems(stats);
+}
+
+void BackendImplV3::OnExternalCacheHit(const std::string& key) {
+ if (disabled_ || key.empty())
+ return;
+
+ uint32 hash = base::Hash(key);
+ EntrySet entries = index_.LookupEntries(hash);
+ if (!entries.cells.size())
+ return;
+
+ if (entries.cells.size() == static_cast<size_t>(entries.evicted_count))
+ return;
+
+ for (size_t i = 0; i < entries.cells.size(); i++) {
+ if (entries.cells[i].GetGroup() == ENTRY_EVICTED)
+ continue;
+
+ index_.UpdateTime(hash, entries.cells[i].GetAddress(), GetTime());
+ }
+
+ EntryImplV3* open_entry = LookupOpenEntry(entries, key);
+ if (open_entry) {
+ eviction_.OnOpenEntry(open_entry);
+ entry_count_++;
+ UpdateRank(open_entry, true);
+ open_entry->Close();
+ return;
+ }
+
+ if (user_flags_ & UNIT_TEST_MODE) {
+ for (size_t i = 0; i < entries.cells.size(); i++) {
+ // This method doesn't have a callback, and it may take a while for the
+ // operation to complete so update the time of any entry with this hash.
+ if (entries.cells[i].GetGroup() != ENTRY_EVICTED) {
+ index_.UpdateTime(hash, entries.cells[i].GetAddress(),
+ GetTime());
+ }
+ }
+ }
+
+ // Read the entry from disk.
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY);
+ work_item->set_flags(WorkItem::WORK_FOR_UPDATE);
+ work_item->set_entries(entries);
+ work_item->set_key(key);
+ PostWorkItem(work_item);
+}
+
+// ------------------------------------------------------------------------
+
+// The maximum cache size will be either set explicitly by the caller, or
+// calculated by this code.
+void BackendImplV3::AdjustMaxCacheSize() {
+ if (max_size_)
+ return;
+
+ // The user is not setting the size, let's figure it out.
+ int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
+ if (available < 0) {
+ max_size_ = kDefaultCacheSize;
+ return;
+ }
+
+ available += index_.header()->num_bytes;
+
+ max_size_ = PreferedCacheSize(available);
+
+ // Let's not use more than the default size while we tune-up the performance
+ // of bigger caches. TODO(rvargas): remove this limit.
+ if (max_size_ > kDefaultCacheSize * 4)
+ max_size_ = kDefaultCacheSize * 4;
+}
+
+bool BackendImplV3::InitStats(void* stats_data) {
+ Addr address(index_.header()->stats);
+ int size = stats_.StorageSize();
+
+ if (!address.is_initialized()) {
+ FileType file_type = Addr::RequiredFileType(size);
+ DCHECK_NE(file_type, EXTERNAL);
+ int num_blocks = Addr::RequiredBlocks(size, file_type);
+
+ if (!CreateBlock(file_type, num_blocks, &address))
+ return false;
+ return stats_.Init(NULL, 0, address);
+ }
+
+ // Load the required data.
+ DCHECK(address.is_block_file());
+ size = address.num_blocks() * address.BlockSize();
+
+ if (!stats_.Init(stats_data, size, address))
+ return false;
+ if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
+ stats_.InitSizeHistogram();
+ return true;
+}
+
+void BackendImplV3::StoreStats() {
+ int size = stats_.StorageSize();
+ scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
+ Addr address;
+ size = stats_.SerializeStats(buffer->data(), size, &address);
+ DCHECK(size);
+ if (!address.is_initialized())
+ return;
+
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_WRITE_DATA);
+ work_item->set_buffer(buffer);
+ work_item->set_buffer_len(size);
+ work_item->set_address(address);
+ work_item->set_offset(0);
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::RestartCache(const CompletionCallback& callback) {
+ PrepareForRestart();
+
+ // Don't call Init() if directed by the unit test: we are simulating a failure
+ // trying to re-enable the cache.
+ if (user_flags_ & UNIT_TEST_MODE) {
+ init_ = true; // Let the destructor do proper cleanup.
+ } else {
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_RESTART);
+ work_item->set_user_callback(callback);
+ work_item->set_flags(user_flags_);
+ PostWorkItem(work_item);
+ }
+}
+
+void BackendImplV3::PrepareForRestart() {
+ if (!(user_flags_ & EVICTION_V2))
+ lru_eviction_ = true;
+
+ disabled_ = true;
+ index_.header()->crash = 0;
+ block_files_.Clear();
+ index_.Reset();
+ init_ = false;
+ restarted_ = true;
+}
+
+void BackendImplV3::CleanupCache() {
+ Trace("Backend Cleanup");
+ //eviction_.Stop();
+ timer_.reset();
+
+ if (init_) {
+ if (!(user_flags_ & NO_CLEAN_ON_EXIT)) {
+ StoreStats();
+ CloseDoomedEntries();
+ ReleaseRecentEntries();
+ UpdateDeletedEntries();
+ index_.OnBackupTimer();
+ }
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_CLEANUP);
+ PostWorkItem(work_item);
+ worker_ = NULL;
+ }
+ ptr_factory_.InvalidateWeakPtrs();
+}
+
+int BackendImplV3::NewEntry(WorkItem* work_item, EntryImplV3** entry) {
+ Addr address =
+ work_item->entries()->cells[work_item->entries()->current].GetAddress();
+
+ // The entry could have been opened since this task was posted to the cache
+ // thread, so let's check again.
+ EntryImplV3* this_entry = GetOpenEntry(address);
+ if (this_entry) {
+ // Easy job. This entry is already in memory.
+ *entry = this_entry;
+ return 0;
+ }
+
+ // Even if the entry is not in memory right now, it could have changed. Note
+ // that any state other than USED means we are either deleting this entry or
+ // it should be in memory.
+ uint32 hash =
+ work_item->entries()->cells[work_item->entries()->current].hash();
+ EntryCell cell = index_.FindEntryCell(hash, address);
+ if (!cell.IsValid() || cell.GetState() != ENTRY_USED)
+ return ERR_INVALID_ENTRY;
+
+ STRESS_DCHECK(block_files_.IsValid(address));
+
+ if (!address.SanityCheckForEntryV3()) {
+ LOG(WARNING) << "Wrong entry address.";
+ STRESS_NOTREACHED();
+ return ERR_INVALID_ADDRESS;
+ }
+
+ scoped_refptr<EntryImplV3> cache_entry;
+ if (address.file_type() == BLOCK_EVICTED) {
+ cache_entry = new EntryImplV3(this, address, work_item->key(),
+ work_item->short_entry_record().Pass());
+ } else {
+ cache_entry = new EntryImplV3(this, address, work_item->key(),
+ work_item->entry_record().Pass());
+ }
+ IncreaseNumRefs();
+ *entry = NULL;
+
+ if (!cache_entry->SanityCheck()) {
+ LOG(WARNING) << "Messed up entry found.";
+ STRESS_NOTREACHED();
+ return ERR_INVALID_ENTRY;
+ }
+
+ STRESS_DCHECK(block_files_.IsValid(
+ Addr(cache_entry->entry()->Data()->rankings_node)));
+
+ if (!cache_entry->DataSanityCheck()) {//----------------------------------------------
+ // just one path? make sure we delete the cell in the first case, and as much data as we can here
+ LOG(WARNING) << "Messed up entry found.";
+ cache_entry->FixForDelete();
+ }
+
+ open_entries_[address.value()] = cache_entry;
+ index_.SetSate(cache_entry->GetHash(), address, ENTRY_OPEN);
+
+ cache_entry->BeginLogging(net_log_, false);
+ cache_entry->OnOpenEntry();
+ cache_entry.swap(entry);
+ return 0;
+}
+
+EntryImplV3* BackendImplV3::LookupOpenEntry(const EntrySet& entries,
+ const std::string key) {
+ for (size_t i = 0; i < entries.cells.size(); i++) {
+ if (entries.cells[i].GetGroup() == ENTRY_EVICTED)
+ continue;
+
+ EntryImplV3* this_entry = GetOpenEntry(entries.cells[i].GetAddress());
+ if (this_entry && this_entry->GetKey() == key)
+ return this_entry;
+ }
+ return NULL;
+}
+
+// This is the actual implementation for OpenNextEntry and OpenPrevEntry.
+int BackendImplV3::OpenFollowingEntry(bool forward, void** iter,
+ Entry** next_entry,
+ const CompletionCallback& callback) {
+ if (disabled_)
+ return net::ERR_FAILED;
+
+ DCHECK(iter);
+
+ scoped_ptr<IndexIterator> iterator(
+ reinterpret_cast<IndexIterator*>(*iter));
+ *iter = NULL;
+
+ if (!iterator.get()) {
+ iterator.reset(new IndexIterator);
+ iterator->timestamp = index_.CalculateTimestamp(GetTime()) + 1;
+ iterator->forward = forward;
+ }
+
+ // Prepare to read the first entry from disk.
+ scoped_refptr<WorkItem> work_item = new WorkItem(WorkItem::WORK_OPEN_ENTRY);
+ work_item->set_flags(WorkItem::WORK_FOR_ITERATION);
+ work_item->set_iterator(iterator.Pass());
+ work_item->set_iter_buffer(iter);
+ work_item->set_entry_buffer(next_entry);
+
+ int rv = OpenNext(work_item);
+ if (rv == net::ERR_IO_PENDING)
+ work_item->set_user_callback(callback);
+
+ return rv;
+}
+
+bool BackendImplV3::GetMoreCells(WorkItem* work_item) {
+ DCHECK(work_item->flags() & WorkItem::WORK_FOR_ITERATION);
+ IndexIterator* iterator = work_item->iterator();
+
+ if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) {
+ int lower_limit = index_.CalculateTimestamp(work_item->initial_time());
+ if (iterator->timestamp <= lower_limit ||
+ !index_.GetNextCells(iterator)) {
+ return false;
+ }
+ return true;
+ }
+
+ return index_.GetNextCells(iterator);
+}
+
+int BackendImplV3::OpenNext(WorkItem* work_item) {
+ Trace("OpenNext work item 0x%p", work_item);
+ CellList* cells = &work_item->iterator()->cells;
+ EntrySet entries;
+ for (;;) {
+ if (cells->empty()) {
+ if (!GetMoreCells(work_item)) {
+ UpdateIterator(NULL, work_item);
+ return net::ERR_FAILED;
+ }
+ DCHECK(!cells->empty());
+ }
+
+ while (!cells->empty()) {
+ EntryCell last_cell = index_.FindEntryCell(cells->back().hash,
+ cells->back().address);
+ cells->pop_back();
+ if (!last_cell.IsValid())
+ continue;
+
+ entries.cells.push_back(last_cell);
+
+ // See if the entry is currently open.
+ EntryImplV3* this_entry = GetOpenEntry(last_cell.GetAddress());
+ if (this_entry) {
+ if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) {
+ Doom(this_entry, work_item);
+ continue;
+ } else {
+ UpdateIterator(this_entry, work_item);
+ return net::OK;
+ }
+ }
+
+ work_item->set_entries(entries);
+ PostWorkItem(work_item);
+ return net::ERR_IO_PENDING;
+ }
+ }
+}
+
+void BackendImplV3::Doom(EntryImplV3* entry, WorkItem* work_item) {
+ if (entry->GetLastUsed() >= work_item->initial_time() &&
+ entry->GetLastUsed() < work_item->end_time()) {
+ Trace("Doom 0x%p work item 0x%p", entry, work_item);
+ entry->Doom();
+ }
+ entry->Close();
+}
+
+void BackendImplV3::UpdateIterator(EntryImplV3* entry, WorkItem* work_item) {
+ int result;
+ if (entry) {
+ result = net::OK;
+ *work_item->iter_buffer() = work_item->ReleaseIterator();
+ *work_item->entry_buffer() = entry;
+ } else if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE) {
+ result = net::OK;
+ } else {
+ result = net::ERR_FAILED;
+ *work_item->iter_buffer() = NULL;
+ *work_item->entry_buffer() = entry;
+ }
+
+ if (!work_item->user_callback().is_null())
+ work_item->user_callback().Run(result);
+}
+
+void BackendImplV3::CloseDoomedEntries() {
+ // Copy the current map to make sure no new entries are deleted.
+ EntriesMap to_delete(entries_to_delete_);
+ for (EntriesMap::iterator it = to_delete.begin();
+ it != to_delete.end(); ++it) {
+ it->second->Close();
+ }
+}
+
+void BackendImplV3::ReleaseRecentEntries() {
+ for (EntriesSet::iterator it = recent_entries_.begin();
+ it != recent_entries_.end(); ++it) {
+ (*it)->Release();
+ }
+ recent_entries_.clear();
+}
+
+void BackendImplV3::UpdateDeletedEntries() {
+ for (size_t i = 0; i < deleted_entries_.size(); i++) {
+ CellInfo& cell_info = deleted_entries_[i];
+ index_.SetSate(cell_info.hash, cell_info.address, ENTRY_FREE);
+ }
+ deleted_entries_.clear();
+}
+
+void BackendImplV3::AddStorageSize(int32 bytes) {
+ index_.header()->num_bytes += bytes;
+ DCHECK_GE(index_.header()->num_bytes, 0);
+}
+
+void BackendImplV3::SubstractStorageSize(int32 bytes) {
+ index_.header()->num_bytes -= bytes;
+ DCHECK_GE(index_.header()->num_bytes, 0);
+}
+
+void BackendImplV3::IncreaseNumRefs() {
+ num_refs_++;
+ if (max_refs_ < num_refs_)
+ max_refs_ = num_refs_;
+}
+
+void BackendImplV3::DecreaseNumRefs() {
+ DCHECK(num_refs_);
+ num_refs_--;
+}
+
+void BackendImplV3::IncreaseNumEntries() {
+ index_.header()->num_entries++;
+ DCHECK_GT(index_.header()->num_entries, 0);
+}
+
+void BackendImplV3::DecreaseNumEntries() {
+ index_.header()->num_entries--;
+ if (index_.header()->num_entries < 0) {
+ NOTREACHED();
+ index_.header()->num_entries = 0;
+ }
+}
+
+void BackendImplV3::PostWorkItem(WorkItem* work_item) {
+ if (!worker_)
+ return;
+ Trace("Post task 0x%p %d flags 0x%x", work_item, work_item->type(),
+ work_item->flags());
+
+ // Long story short: we expect to see the work item back on this thread.
+ // If the task is not executed we'll leak work_item, but that should only
+ // happen at shutdown.
+ work_item->AddRef();
+ work_item->set_closure(base::Bind(&BackendImplV3::OnWorkDone,
+ ptr_factory_.GetWeakPtr()));
+ cache_thread_->PostTask(
+ FROM_HERE,
+ base::Bind(&BackendImplV3::Worker::OnDoWork, worker_, work_item));
+}
+
+void BackendImplV3::OnWorkDone(WorkItem* work_item) {
+ Trace("Task done 0x%p %d flags 0x%x", work_item, work_item->type(),
+ work_item->flags());
+ // Balance the reference from PostWorkItem.
+ scoped_refptr<WorkItem> my_work_item;
+ my_work_item.swap(&work_item);
+
+ if (!worker_) {
+ // This may be called after CleanupForTest was called.
+ if (!my_work_item->user_callback().is_null())
+ my_work_item->user_callback().Run(my_work_item->result());
+ return;
+ }
+
+ switch (my_work_item->type()) {
+ case WorkItem::WORK_INIT: return OnInitComplete(my_work_item);
+ case WorkItem::WORK_RESTART: return OnInitComplete(my_work_item);
+ case WorkItem::WORK_GROW_INDEX: return OnGrowIndexComplete(my_work_item);
+ case WorkItem::WORK_GROW_FILES: return OnGrowFilesComplete(my_work_item);
+ case WorkItem::WORK_OPEN_ENTRY: return OnOpenEntryComplete(my_work_item);
+ default: return OnOperationComplete(my_work_item);
+ }
+}
+
+void BackendImplV3::OnInitComplete(WorkItem* work_item) {
+ int rv = work_item->result();
+ if (rv != ERR_NO_ERROR && rv != ERR_CACHE_CREATED &&
+ rv != ERR_PREVIOUS_CRASH) {
+ ReportError(rv);
+ return work_item->user_callback().Run(net::ERR_FAILED);
+ }
+
+#if defined(NET_BUILD_STRESS_CACHE)
+ // Start evictions right away.
+ up_ticks_ = kTrimDelay * 2;
+#endif
+ DCHECK(!init_);
+
+ num_refs_ = max_refs_ = 0;
+ entry_count_ = byte_count_ = 0;
+
+ if (!restarted_) {
+ buffer_bytes_ = 0;
+ trace_object_ = TraceObject::GetTraceObject();
+ // Create a recurrent timer of 30 secs (90 minutes for tests).
+ int timer_delay = user_flags_ & BASIC_UNIT_TEST ? 90 * 60 * 1000 :
+ kTimerSeconds * 1000;
+ timer_.reset(new base::RepeatingTimer<BackendImplV3>());
+ timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
+ &BackendImplV3::OnTimerTick);
+ }
+ Trace("Init");
+ init_ = true;
+
+ scoped_ptr<InitResult> result = work_item->init_result();
+ index_.Init(&result.get()->index_data);
+
+ if (index_.header()->experiment != NO_EXPERIMENT &&
+ cache_type_ != net::DISK_CACHE) {
+ // No experiment for other caches.
+ ReportError(ERR_INIT_FAILED);
+ return work_item->user_callback().Run(net::ERR_FAILED);
+ }
+
+ if (!(user_flags_ & BASIC_UNIT_TEST)) {
+ // The unit test controls directly what to test.
+ lru_eviction_ = (cache_type_ != net::DISK_CACHE);
+ }
+
+ if (!CheckIndex()) {
+ ReportError(ERR_INIT_FAILED);
+ return work_item->user_callback().Run(net::ERR_FAILED);
+ }
+ AdjustMaxCacheSize();
+
+ block_files_.Init(result->block_bitmaps);
+
+ // We want to minimize the changes to cache for an AppCache.
+ if (cache_type() == net::APP_CACHE) {
+ DCHECK(lru_eviction_);
+ read_only_ = true;
+ } else if (cache_type() == net::SHADER_CACHE) {
+ DCHECK(lru_eviction_);
+ }
+
+ eviction_.Init(this);
+
+ int64 errors, full_dooms, partial_dooms, last_report;
+ errors = full_dooms = partial_dooms = last_report = 0;
+ if (work_item->type() == WorkItem::WORK_RESTART) {
+ int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
+ int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
+ int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
+ int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
+ }
+
+ if (!InitStats(result->stats_data.get())) {
+ ReportError(ERR_INIT_FAILED);
+ return work_item->user_callback().Run(net::ERR_FAILED);
+ }
+
+ disabled_ = false;
+
+#if defined(STRESS_CACHE_EXTENDED_VALIDATION)
+ trace_object_->EnableTracing(false);
+ int sc = SelfCheck();
+ if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
+ NOTREACHED();
+ trace_object_->EnableTracing(true);
+#endif
+
+ if (work_item->type() == WorkItem::WORK_RESTART) {
+ stats_.SetCounter(Stats::FATAL_ERROR, errors);
+ stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
+ stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
+ stats_.SetCounter(Stats::LAST_REPORT, last_report);
+ }
+
+ ReportError(rv);
+ return work_item->user_callback().Run(net::OK);
+}
+
+void BackendImplV3::OnGrowIndexComplete(WorkItem* work_item) {
+ if (work_item->result() != ERR_NO_ERROR || disabled_ ||
+ (work_item->flags() & WorkItem::WORK_COMPLETE)) {
+ growing_index_ = false;
+ return;
+ }
+
+ scoped_ptr<InitResult> result = work_item->init_result();
+ index_.Init(&result.get()->index_data);
+ work_item->set_flags(WorkItem::WORK_COMPLETE);
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::OnGrowFilesComplete(WorkItem* work_item) {
+ if (work_item->result() != ERR_NO_ERROR || disabled_ ||
+ (work_item->flags() & WorkItem::WORK_COMPLETE)) {
+ growing_files_ = false;
+ return;
+ }
+
+ scoped_ptr<InitResult> result = work_item->init_result();
+ block_files_.Init(result->block_bitmaps);
+ work_item->set_flags(WorkItem::WORK_COMPLETE);
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::OnOperationComplete(WorkItem* work_item) {
+ if (work_item->result() < 0 && work_item->owner_entry()) {
+ // Make sure that there's a call to Close() after Doom().
+ work_item->owner_entry()->AddRef();
+ work_item->owner_entry()->Doom();
+ work_item->owner_entry()->Close();
+ }
+
+ if (!work_item->user_callback().is_null())
+ work_item->user_callback().Run(work_item->result());
+}
+
+
+void BackendImplV3::OnOpenEntryComplete(WorkItem* work_item) {
+ Trace("Open complete");
+ if (work_item->flags() & WorkItem::WORK_FOR_RESURRECT)
+ return OnOpenForResurrectComplete(work_item);
+
+ if (work_item->flags() & WorkItem::WORK_FOR_EVICT)
+ return OnEvictEntryComplete(work_item);
+
+ if (work_item->flags() & WorkItem::WORK_FOR_ITERATION)
+ return OnOpenNextComplete(work_item);
+
+ if (work_item->result() == ERR_NO_ERROR) {
+ EntryImplV3* entry;
+ int error = NewEntry(work_item, &entry);
+ if (!error) {
+ if (work_item->flags() & WorkItem::WORK_FOR_DOOM) {
+ entry->Doom();
+ entry->Close();
+ } else {
+ eviction_.OnOpenEntry(entry);
+ entry_count_++;
+ if (work_item->flags() & WorkItem::WORK_FOR_UPDATE) {
+ UpdateRank(entry, true);
+ return;
+ }
+ *work_item->entry_buffer() = entry;
+
+ Trace("Open hash 0x%x end: 0x%x", entry->GetHash(),
+ entry->GetAddress().value());
+ stats_.OnEvent(Stats::OPEN_HIT);
+ SIMPLE_STATS_COUNTER("disk_cache.hit");
+ }
+
+ work_item->user_callback().Run(net::OK);
+ return;
+ }
+ }
+
+ if (work_item->entries()->current >= work_item->entries()->cells.size() - 1) {// - 1?
+ // Not found.
+ work_item->user_callback().Run(net::ERR_FAILED);
+ return;
+ }
+
+ //+post a task to delete the cell
+
+ // Open the next entry on the list.
+ work_item->entries()->current++;
+ if (work_item->entries()->current < work_item->entries()->cells.size())
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::OnOpenForResurrectComplete(WorkItem* work_item) {
+ if (work_item->result() == ERR_NO_ERROR) {
+ EntryImplV3* deleted_entry;
+ int error = NewEntry(work_item, &deleted_entry);
+ if (!error) {
+ scoped_ptr<ShortEntryRecord> entry_record =
+ deleted_entry->GetShortEntryRecord();
+ CHECK(entry_record);
+ if (!entry_record) {
+ // This is an active entry.
+ deleted_entry->Close();
+ stats_.OnEvent(Stats::CREATE_MISS);
+ Trace("create entry miss ");
+ work_item->user_callback().Run(net::ERR_FAILED);//doesn't make any sense
+ return;
+ }
+
+ // We are attempting to create an entry and found out that the entry was
+ // previously deleted.
+
+ stats_.OnEvent(Stats::RESURRECT_HIT);
+ Trace("Resurrect entry hit ");
+ deleted_entry->Doom();
+ deleted_entry->Close();
+
+ int rv =
+ OnCreateEntryComplete(work_item->key(), deleted_entry->GetHash(),
+ entry_record.get(), work_item->entry_buffer(),
+ work_item->user_callback());
+ DCHECK_EQ(rv, net::OK);
+ return;
+ }
+ }
+
+ if (work_item->entries()->current >= work_item->entries()->cells.size()) {
+ // Not found.
+ work_item->user_callback().Run(net::ERR_FAILED);
+ return;
+ }
+
+ //+post a task to delete the cell
+
+ // Open the next entry on the list.
+ work_item->entries()->current++;
+ if (work_item->entries()->current < work_item->entries()->cells.size())
+ PostWorkItem(work_item);
+}
+
+void BackendImplV3::OnEvictEntryComplete(WorkItem* work_item) {
+ if (work_item->result() != ERR_NO_ERROR)
+ return eviction_.OnEvictEntryComplete();
+
+ EntryCell old_cell =
+ index_.FindEntryCell(work_item->entries()->cells[0].hash(),
+ work_item->entries()->cells[0].GetAddress());
+ DCHECK(old_cell.IsValid());
+
+ if (!(work_item->flags() & WorkItem::WORK_NO_COPY)) {
+ EntryCell new_cell =
+ index_.FindEntryCell(work_item->entries()->cells[1].hash(),
+ work_item->entries()->cells[1].GetAddress());
+ DCHECK(new_cell.IsValid());
+ }
+
+ EntryImplV3* entry;
+ int error = NewEntry(work_item, &entry);
+ if (!error) {
+ entry->Doom();
+ entry->Close();
+ }
+
+ //+delete old_cell after a timer (so add to deleted entries).
+
+ eviction_.OnEvictEntryComplete();
+}
+
+void BackendImplV3::OnOpenNextComplete(WorkItem* work_item) {
+ Trace("OpenNext complete, work item 0x%p", work_item);
+ if (work_item->result() != ERR_NO_ERROR) {
+ OpenNext(work_item); // Ignore result.
+ return;
+ }
+
+ EntryImplV3* entry;
+ int error = NewEntry(work_item, &entry);
+ if (!error) {
+ if (work_item->flags() & WorkItem::WORK_FOR_DOOM_RANGE)
+ Doom(entry, work_item);
+ else
+ return UpdateIterator(entry, work_item);
+ }
+
+ // Grab another entry.
+ OpenNext(work_item); // Ignore result.
+}
+
+int BackendImplV3::OnCreateEntryComplete(const std::string& key, uint32 hash,
+ ShortEntryRecord* short_record,
+ Entry** entry,
+ const CompletionCallback& callback) {
+ // Create a new object in memory and return it to the caller.
+ Addr entry_address;
+ Trace("Create complete hash 0x%x", hash);
+ if (!block_files_.CreateBlock(BLOCK_ENTRIES, 1, &entry_address)) {
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return net::ERR_FAILED;
+ }
+
+ EntryCell cell = index_.CreateEntryCell(hash, entry_address);
+ if (!cell.IsValid()) {
+ block_files_.DeleteBlock(entry_address);
+ return net::ERR_FAILED;
+ }
+
+ scoped_refptr<EntryImplV3> cache_entry(
+ new EntryImplV3(this, cell.GetAddress(), false));
+ IncreaseNumRefs();
+
+ cache_entry->CreateEntry(key, hash, short_record);
+ cache_entry->BeginLogging(net_log_, true);
+
+ // We are not failing the operation; let's add this to the map.
+ open_entries_[cell.GetAddress().value()] = cache_entry;
+
+ IncreaseNumEntries();
+ entry_count_++;
+
+ if (short_record)
+ eviction_.OnResurrectEntry(cache_entry);
+ else
+ eviction_.OnCreateEntry(cache_entry);
+
+ stats_.OnEvent(Stats::CREATE_HIT);
+ SIMPLE_STATS_COUNTER("disk_cache.miss");
+ Trace("create entry hit ");
+ cache_entry->AddRef();
+ *entry = cache_entry.get();
+
+ if (short_record)
+ callback.Run(net::OK);
+
+ return net::OK;
+}
+
+void BackendImplV3::LogStats() {
+ StatsItems stats;
+ GetStats(&stats);
+
+ for (size_t index = 0; index < stats.size(); index++)
+ VLOG(1) << stats[index].first << ": " << stats[index].second;
+}
+
+void BackendImplV3::ReportStats() {
+ IndexHeaderV3* header = index_.header();
+ CACHE_UMA(COUNTS, "Entries", header->num_entries);
+
+ int current_size = header->num_bytes / (1024 * 1024);
+ int max_size = max_size_ / (1024 * 1024);
+
+ CACHE_UMA(COUNTS_10000, "Size", current_size);
+ CACHE_UMA(COUNTS_10000, "MaxSize", max_size);
+ if (!max_size)
+ max_size++;
+ CACHE_UMA(PERCENTAGE, "UsedSpace", current_size * 100 / max_size);
+
+ CACHE_UMA(COUNTS_10000, "AverageOpenEntries",
+ static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
+ CACHE_UMA(COUNTS_10000, "MaxOpenEntries",
+ static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
+ stats_.SetCounter(Stats::MAX_ENTRIES, 0);
+
+ CACHE_UMA(COUNTS_10000, "TotalFatalErrors",
+ static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
+ CACHE_UMA(COUNTS_10000, "TotalDoomCache",
+ static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
+ CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries",
+ static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
+ stats_.SetCounter(Stats::FATAL_ERROR, 0);
+ stats_.SetCounter(Stats::DOOM_CACHE, 0);
+ stats_.SetCounter(Stats::DOOM_RECENT, 0);
+
+ int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
+ if (!(header->flags & CACHE_EVICTED)) {
+ CACHE_UMA(HOURS, "TotalTimeNotFull", static_cast<int>(total_hours));
+ return;
+ }
+
+ // This is an up to date client that will report FirstEviction() data. After
+ // that event, start reporting this:
+
+ CACHE_UMA(HOURS, "TotalTime", static_cast<int>(total_hours));
+
+ int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
+ stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
+
+ // We may see users with no use_hours at this point if this is the first time
+ // we are running this code.
+ if (use_hours)
+ use_hours = total_hours - use_hours;
+
+ if (!use_hours || !GetEntryCount() || !header->num_bytes)
+ return;
+
+ CACHE_UMA(HOURS, "UseTime", static_cast<int>(use_hours));
+
+ int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
+ CACHE_UMA(COUNTS, "TrimRate", static_cast<int>(trim_rate));
+
+ int avg_size = header->num_bytes / GetEntryCount();
+ CACHE_UMA(COUNTS, "EntrySize", avg_size);
+ CACHE_UMA(COUNTS, "EntriesFull", header->num_entries);
+
+ int large_entries_bytes = stats_.GetLargeEntriesSize();
+ int large_ratio = large_entries_bytes * 100 / header->num_bytes;
+ CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", large_ratio);
+
+ if (!lru_eviction_) {
+ CACHE_UMA(PERCENTAGE, "ResurrectRatio", stats_.GetResurrectRatio());
+ CACHE_UMA(PERCENTAGE, "NoUseRatio",
+ header->num_no_use_entries * 100 / header->num_entries);
+ CACHE_UMA(PERCENTAGE, "LowUseRatio",
+ header->num_low_use_entries * 100 / header->num_entries);
+ CACHE_UMA(PERCENTAGE, "HighUseRatio",
+ header->num_high_use_entries * 100 / header->num_entries);
+ CACHE_UMA(PERCENTAGE, "DeletedRatio",
+ header->num_evicted_entries * 100 / header->num_entries);
+ }
+
+ stats_.ResetRatios();
+ stats_.SetCounter(Stats::TRIM_ENTRY, 0);
+
+ if (cache_type_ == net::DISK_CACHE)
+ block_files_.ReportStats();
+}
+
+void BackendImplV3::ReportError(int error) {
+ STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
+ error == ERR_CACHE_CREATED);
+
+ // We transmit positive numbers, instead of direct error codes.
+ DCHECK_LE(error, 0);
+ CACHE_UMA(CACHE_ERROR, "Error", error * -1);
+}
+
+bool BackendImplV3::CheckIndex() {
+ if (index_.header()->flags & CACHE_EVICTION_2)
+ lru_eviction_ = false;
+
+ /*
+ if (!index_.header()->table_len) {
+ LOG(ERROR) << "Invalid table size";
+ return false;
+ }
+
+ if (current_size < GetIndexSize(index_.header()->table_len) ||
+ index_.header()->table_len & (kBaseTableLen - 1)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ AdjustMaxCacheSize(index_.header()->table_len);
+
+#if !defined(NET_BUILD_STRESS_CACHE)
+ if (index_.header()->num_bytes < 0 ||
+ (max_size_ < kint32max - kDefaultCacheSize &&
+ index_.header()->num_bytes > max_size_ + kDefaultCacheSize)) {
+ LOG(ERROR) << "Invalid cache (current) size";
+ return false;
+ }
+#endif
+
+ if (index_.header()->num_entries < 0) {
+ LOG(ERROR) << "Invalid number of entries";
+ return false;
+ }
+
+ if (!mask_)
+ mask_ = index_.header()->table_len - 1;
+
+ // Load the table into memory with a single read.
+ scoped_array<char> buf(new char[current_size]);
+ return index_->Read(buf.get(), current_size, 0);
+ */
+
+ //Make sure things look fine, maybe scan the whole thing if not.
+ return true;
+}
+
+int BackendImplV3::CheckAllEntries() {
+ /*
+ int num_dirty = 0;
+ int num_entries = 0;
+ DCHECK(mask_ < kuint32max);
+ for (unsigned int i = 0; i <= mask_; i++) {
+ Addr address(data_->table[i]);
+ if (!address.is_initialized())
+ continue;
+ for (;;) {
+ EntryImplV3* tmp;
+ int ret = NewEntry(address, &tmp);
+ if (ret) {
+ STRESS_NOTREACHED();
+ return ret;
+ }
+ scoped_refptr<EntryImplV3> cache_entry;
+ cache_entry.swap(&tmp);
+
+ if (cache_entry->dirty())
+ num_dirty++;
+ else if (CheckEntry(cache_entry.get()))
+ num_entries++;
+ else
+ return ERR_INVALID_ENTRY;
+
+ DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
+ address.set_value(cache_entry->GetNextAddress());
+ if (!address.is_initialized())
+ break;
+ }
+ }
+
+ Trace("CheckAllEntries End");
+ if (num_entries + num_dirty != index_.header()->num_entries) {
+ LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
+ " " << index_.header()->num_entries;
+ DCHECK_LT(num_entries, index_.header()->num_entries);
+ return ERR_NUM_ENTRIES_MISMATCH;
+ }
+
+ return num_dirty;
+ */
+ return 0;
+}
+
+bool BackendImplV3::CheckEntry(EntryImplV3* cache_entry) {
+ /*
+ bool ok = block_files_.IsValid(cache_entry->entry()->address());
+ ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
+ EntryStore* data = cache_entry->entry()->Data();
+ for (size_t i = 0; i < arraysize(data->data_addr); i++) {
+ if (data->data_addr[i]) {
+ Addr address(data->data_addr[i]);
+ if (address.is_block_file())
+ ok = ok && block_files_.IsValid(address);
+ }
+ }
+
+ return ok && cache_entry->rankings()->VerifyHash();
+ */
+ return true;
+}
+
+int BackendImplV3::MaxBuffersSize() {
+ static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
+ static bool done = false;
+
+ if (!done) {
+ const int kMaxBuffersSize = 30 * 1024 * 1024;
+
+ // We want to use up to 2% of the computer's memory.
+ total_memory = total_memory * 2 / 100;
+ if (total_memory > kMaxBuffersSize || total_memory <= 0)
+ total_memory = kMaxBuffersSize;
+
+ done = true;
+ }
+
+ return static_cast<int>(total_memory);
+}
+
+} // namespace disk_cache
Property changes on: net\disk_cache\v3\backend_impl_v3.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
« no previous file with comments | « net/disk_cache/v3/backend_impl_v3.h ('k') | net/disk_cache/v3/backend_work_item.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698