Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(345)

Unified Diff: net/disk_cache/backend_impl.cc

Issue 15203004: Disk cache: Reference CL for the implementation of file format version 3. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: IndexTable review Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/disk_cache/backend_impl.h ('k') | net/disk_cache/backend_unittest.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/disk_cache/backend_impl.cc
===================================================================
--- net/disk_cache/backend_impl.cc (revision 199883)
+++ net/disk_cache/backend_impl.cc (working copy)
@@ -22,6 +22,7 @@
#include "base/timer.h"
#include "net/base/net_errors.h"
#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/disk_format.h"
#include "net/disk_cache/entry_impl.h"
#include "net/disk_cache/errors.h"
#include "net/disk_cache/experiments.h"
@@ -284,7 +285,7 @@
bool previous_crash = (data_->header.crash != 0);
data_->header.crash = 1;
- if (!block_files_.Init(create_files))
+ if (!block_files_.Init(create_files, kFirstAdditionalBlockFile))
return net::ERR_FAILED;
// We want to minimize the changes to cache for an AppCache.
@@ -299,7 +300,7 @@
// stats_ and rankings_ may end up calling back to us so we better be enabled.
disabled_ = false;
- if (!stats_.Init(this, &data_->header.stats))
+ if (!InitStats())
return net::ERR_FAILED;
disabled_ = !rankings_.Init(this, new_eviction_);
@@ -329,7 +330,7 @@
timer_.reset();
if (init_) {
- stats_.Store();
+ StoreStats();
if (data_)
data_->header.crash = 0;
@@ -607,7 +608,7 @@
cache_entry->BeginLogging(net_log_, true);
// We are not failing the operation; let's add this to the map.
- open_entries_[entry_address.value()] = cache_entry;
+ open_entries_[entry_address.value()] = cache_entry.get();
// Save the entry.
cache_entry->entry()->Store();
@@ -623,7 +624,7 @@
}
// Link this entry through the lists.
- eviction_.OnCreateEntry(cache_entry);
+ eviction_.OnCreateEntry(cache_entry.get());
CACHE_UMA(AGE_MS, "CreateTime", 0, start);
stats_.OnEvent(Stats::CREATE_HIT);
@@ -1000,8 +1001,8 @@
disabled_ = true;
if (!num_refs_)
- MessageLoop::current()->PostTask(FROM_HERE,
- base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
}
void BackendImpl::ReportError(int error) {
@@ -1066,7 +1067,7 @@
// Save stats to disk at 5 min intervals.
if (time % 10 == 0)
- stats_.Store();
+ StoreStats();
}
void BackendImpl::IncrementIoCount() {
@@ -1247,6 +1248,10 @@
item.second = base::StringPrintf("%d", data_->header.num_bytes);
stats->push_back(item);
+ item.first = "Cache type";
+ item.second = "Blockfile Cache";
+ stats->push_back(item);
+
stats_.GetItems(stats);
}
@@ -1294,7 +1299,7 @@
bool ret = true;
if (*file_created)
- ret = CreateBackingStore(file);
+ ret = CreateBackingStore(file.get());
file = NULL;
if (!ret)
@@ -1352,11 +1357,68 @@
max_size_= current_max_size;
}
+bool BackendImpl::InitStats() {
+ Addr address(data_->header.stats);
+ int size = stats_.StorageSize();
+
+ if (!address.is_initialized()) {
+ FileType file_type = Addr::RequiredFileType(size);
+ DCHECK_NE(file_type, EXTERNAL);
+ int num_blocks = Addr::RequiredBlocks(size, file_type);
+
+ if (!CreateBlock(file_type, num_blocks, &address))
+ return false;
+
+ data_->header.stats = address.value();
+ return stats_.Init(NULL, 0, address);
+ }
+
+ if (!address.is_block_file()) {
+ NOTREACHED();
+ return false;
+ }
+
+ // Load the required data.
+ size = address.num_blocks() * address.BlockSize();
+ MappedFile* file = File(address);
+ if (!file)
+ return false;
+
+ scoped_ptr<char[]> data(new char[size]);
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (!file->Read(data.get(), size, offset))
+ return false;
+
+ if (!stats_.Init(data.get(), size, address))
+ return false;
+ if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
+ stats_.InitSizeHistogram();
+ return true;
+}
+
+void BackendImpl::StoreStats() {
+ int size = stats_.StorageSize();
+ scoped_ptr<char[]> data(new char[size]);
+ Addr address;
+ size = stats_.SerializeStats(data.get(), size, &address);
+ DCHECK(size);
+ if (!address.is_initialized())
+ return;
+
+ MappedFile* file = File(address);
+ if (!file)
+ return;
+
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ file->Write(data.get(), size, offset); // ignore result.
+}
+
void BackendImpl::RestartCache(bool failure) {
int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
- int64 ga_evictions = stats_.GetCounter(Stats::GAJS_EVICTED);
int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
PrepareForRestart();
@@ -1376,7 +1438,6 @@
stats_.SetCounter(Stats::FATAL_ERROR, errors);
stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
- stats_.SetCounter(Stats::GAJS_EVICTED, ga_evictions);
stats_.SetCounter(Stats::LAST_REPORT, last_report);
}
}
@@ -1412,7 +1473,7 @@
STRESS_DCHECK(block_files_.IsValid(address));
- if (!address.SanityCheckForEntry()) {
+ if (!address.SanityCheckForEntryV2()) {
LOG(WARNING) << "Wrong entry address.";
STRESS_NOTREACHED();
return ERR_INVALID_ADDRESS;
@@ -1470,7 +1531,7 @@
address.value());
}
- open_entries_[address.value()] = cache_entry;
+ open_entries_[address.value()] = cache_entry.get();
cache_entry->BeginLogging(net_log_, false);
cache_entry.swap(entry);
@@ -1529,7 +1590,7 @@
if (!error) {
// It is important to call DestroyInvalidEntry after removing this
// entry from the table.
- DestroyInvalidEntry(cache_entry);
+ DestroyInvalidEntry(cache_entry.get());
cache_entry = NULL;
} else {
Trace("NewEntry failed on MatchEntry 0x%x", address.value());
@@ -1779,8 +1840,8 @@
num_refs_--;
if (!num_refs_ && disabled_)
- MessageLoop::current()->PostTask(FROM_HERE,
- base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
+ base::MessageLoop::current()->PostTask(
+ FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
}
void BackendImpl::IncreaseNumEntries() {
@@ -1834,12 +1895,9 @@
static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
- CACHE_UMA(COUNTS_10000, "TotalEvictionsGaJs", 0,
- static_cast<int>(stats_.GetCounter(Stats::GAJS_EVICTED)));
stats_.SetCounter(Stats::FATAL_ERROR, 0);
stats_.SetCounter(Stats::DOOM_CACHE, 0);
stats_.SetCounter(Stats::DOOM_RECENT, 0);
- stats_.SetCounter(Stats::GAJS_EVICTED, 0);
int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
if (!data_->header.create_time || !data_->header.lru.filled) {
« no previous file with comments | « net/disk_cache/backend_impl.h ('k') | net/disk_cache/backend_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698