Index: net/disk_cache/blockfile/backend_impl.cc |
diff --git a/net/disk_cache/blockfile/backend_impl.cc b/net/disk_cache/blockfile/backend_impl.cc |
index 49ff0c783ce8da9aff025526c5440382ca047e97..e245e46b6858b570fc0b41124d2f0b678d5578ed 100644 |
--- a/net/disk_cache/blockfile/backend_impl.cc |
+++ b/net/disk_cache/blockfile/backend_impl.cc |
@@ -85,7 +85,7 @@ bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) { |
} |
if (base::FieldTrialList::FindFullName("SimpleCacheTrial") == |
- "ExperimentControl") { |
+ "ExperimentControl") { |
if (cache_created) { |
header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL; |
return true; |
@@ -290,7 +290,9 @@ int BackendImpl::SyncInit() { |
// Create a recurrent timer of 30 secs. |
int timer_delay = unit_test_ ? 1000 : 30000; |
timer_.reset(new base::RepeatingTimer<BackendImpl>()); |
- timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this, |
+ timer_->Start(FROM_HERE, |
+ TimeDelta::FromMilliseconds(timer_delay), |
+ this, |
&BackendImpl::OnStatsTimer); |
} |
@@ -324,7 +326,8 @@ void BackendImpl::CleanupCache() { |
// ------------------------------------------------------------------------ |
-int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry, |
+int BackendImpl::OpenPrevEntry(void** iter, |
+ Entry** prev_entry, |
const CompletionCallback& callback) { |
DCHECK(!callback.is_null()); |
background_queue_.OpenPrevEntry(iter, prev_entry, callback); |
@@ -393,8 +396,7 @@ int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time, |
node = next; |
next = OpenNextEntryImpl(&iter); |
- if (node->GetLastUsed() >= initial_time && |
- node->GetLastUsed() < end_time) { |
+ if (node->GetLastUsed() >= initial_time && node->GetLastUsed() < end_time) { |
node->DoomImpl(); |
} else if (node->GetLastUsed() < initial_time) { |
if (next) |
@@ -498,7 +500,8 @@ EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) { |
eviction_.OnOpenEntry(cache_entry); |
entry_count_++; |
- Trace("Open hash 0x%x end: 0x%x", hash, |
+ Trace("Open hash 0x%x end: 0x%x", |
+ hash, |
cache_entry->entry()->address().value()); |
CACHE_UMA(AGE_MS, "OpenTime", 0, start); |
CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size); |
@@ -694,8 +697,9 @@ bool BackendImpl::CreateExternalFile(Addr* address) { |
return true; |
} |
-bool BackendImpl::CreateBlock(FileType block_type, int block_count, |
- Addr* block_address) { |
+bool BackendImpl::CreateBlock(FileType block_type, |
+ int block_count, |
+ Addr* block_address) { |
return block_files_.CreateBlock(block_type, block_count, block_address); |
} |
@@ -891,8 +895,8 @@ bool BackendImpl::IsLoaded() const { |
std::string BackendImpl::HistogramName(const char* name, int experiment) const { |
if (!experiment) |
return base::StringPrintf("DiskCache.%d.%s", cache_type_, name); |
- return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_, |
- name, experiment); |
+ return base::StringPrintf( |
+ "DiskCache.%d.%s_%d", cache_type_, name, experiment); |
} |
base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() { |
@@ -930,9 +934,13 @@ void BackendImpl::FirstEviction() { |
if (!use_time) |
use_time = 1; |
- CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0, |
+ CACHE_UMA(COUNTS_10000, |
+ "FirstEntryAccessRate", |
+ 0, |
static_cast<int>(data_->header.num_entries / use_time)); |
- CACHE_UMA(COUNTS, "FirstByteIORate", 0, |
+ CACHE_UMA(COUNTS, |
+ "FirstByteIORate", |
+ 0, |
static_cast<int>((data_->header.num_bytes / 1024) / use_time)); |
int avg_size = data_->header.num_bytes / GetEntryCount(); |
@@ -944,11 +952,17 @@ void BackendImpl::FirstEviction() { |
if (new_eviction_) { |
CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio()); |
- CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0, |
+ CACHE_UMA(PERCENTAGE, |
+ "FirstNoUseRatio", |
+ 0, |
data_->header.lru.sizes[0] * 100 / data_->header.num_entries); |
- CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0, |
+ CACHE_UMA(PERCENTAGE, |
+ "FirstLowUseRatio", |
+ 0, |
data_->header.lru.sizes[1] * 100 / data_->header.num_entries); |
- CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0, |
+ CACHE_UMA(PERCENTAGE, |
+ "FirstHighUseRatio", |
+ 0, |
data_->header.lru.sizes[2] * 100 / data_->header.num_entries); |
} |
@@ -1138,8 +1152,8 @@ int32 BackendImpl::GetEntryCount() const { |
if (!index_.get() || disabled_) |
return 0; |
// num_entries includes entries already evicted. |
- int32 not_deleted = data_->header.num_entries - |
- data_->header.lru.sizes[Rankings::DELETED]; |
+ int32 not_deleted = |
+ data_->header.num_entries - data_->header.lru.sizes[Rankings::DELETED]; |
if (not_deleted < 0) { |
NOTREACHED(); |
@@ -1149,14 +1163,16 @@ int32 BackendImpl::GetEntryCount() const { |
return not_deleted; |
} |
-int BackendImpl::OpenEntry(const std::string& key, Entry** entry, |
+int BackendImpl::OpenEntry(const std::string& key, |
+ Entry** entry, |
const CompletionCallback& callback) { |
DCHECK(!callback.is_null()); |
background_queue_.OpenEntry(key, entry, callback); |
return net::ERR_IO_PENDING; |
} |
-int BackendImpl::CreateEntry(const std::string& key, Entry** entry, |
+int BackendImpl::CreateEntry(const std::string& key, |
+ Entry** entry, |
const CompletionCallback& callback) { |
DCHECK(!callback.is_null()); |
background_queue_.CreateEntry(key, entry, callback); |
@@ -1191,7 +1207,8 @@ int BackendImpl::DoomEntriesSince(const base::Time initial_time, |
return net::ERR_IO_PENDING; |
} |
-int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry, |
+int BackendImpl::OpenNextEntry(void** iter, |
+ Entry** next_entry, |
const CompletionCallback& callback) { |
DCHECK(!callback.is_null()); |
background_queue_.OpenNextEntry(iter, next_entry, callback); |
@@ -1325,7 +1342,7 @@ void BackendImpl::AdjustMaxCacheSize(int table_len) { |
// If we already have a table, adjust the size to it. |
int current_max_size = MaxStorageSizeForTable(table_len); |
if (max_size_ > current_max_size) |
- max_size_= current_max_size; |
+ max_size_ = current_max_size; |
} |
bool BackendImpl::InitStats() { |
@@ -1356,8 +1373,8 @@ bool BackendImpl::InitStats() { |
return false; |
scoped_ptr<char[]> data(new char[size]); |
- size_t offset = address.start_block() * address.BlockSize() + |
- kBlockHeaderSize; |
+ size_t offset = |
+ address.start_block() * address.BlockSize() + kBlockHeaderSize; |
if (!file->Read(data.get(), size, offset)) |
return false; |
@@ -1381,8 +1398,8 @@ void BackendImpl::StoreStats() { |
if (!file) |
return; |
- size_t offset = address.start_block() * address.BlockSize() + |
- kBlockHeaderSize; |
+ size_t offset = |
+ address.start_block() * address.BlockSize() + kBlockHeaderSize; |
file->Write(data.get(), size, offset); // ignore result. |
} |
@@ -1469,8 +1486,8 @@ int BackendImpl::NewEntry(Addr address, EntryImpl** entry) { |
return ERR_INVALID_ENTRY; |
} |
- STRESS_DCHECK(block_files_.IsValid( |
- Addr(cache_entry->entry()->Data()->rankings_node))); |
+ STRESS_DCHECK( |
+ block_files_.IsValid(Addr(cache_entry->entry()->Data()->rankings_node))); |
if (!cache_entry->LoadNodeAddress()) |
return ERR_READ_FAILURE; |
@@ -1498,7 +1515,8 @@ int BackendImpl::NewEntry(Addr address, EntryImpl** entry) { |
cache_entry->SetDirtyFlag(GetCurrentEntryId()); |
if (cache_entry->dirty()) { |
- Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()), |
+ Trace("Dirty entry 0x%p 0x%x", |
+ reinterpret_cast<void*>(cache_entry.get()), |
address.value()); |
} |
@@ -1509,8 +1527,10 @@ int BackendImpl::NewEntry(Addr address, EntryImpl** entry) { |
return 0; |
} |
-EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash, |
- bool find_parent, Addr entry_addr, |
+EntryImpl* BackendImpl::MatchEntry(const std::string& key, |
+ uint32 hash, |
+ bool find_parent, |
+ Addr entry_addr, |
bool* match_error) { |
Addr address(data_->table[hash & mask_]); |
scoped_refptr<EntryImpl> cache_entry, parent_entry; |
@@ -1555,7 +1575,9 @@ EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash, |
data_->table[hash & mask_] = child.value(); |
} |
- Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(), |
+ Trace("MatchEntry dirty %d 0x%x 0x%x", |
+ find_parent, |
+ entry_addr.value(), |
address.value()); |
if (!error) { |
@@ -1631,8 +1653,8 @@ EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) { |
// Get an entry from each list. |
for (int i = 0; i < kListsToSearch; i++) { |
EntryImpl* temp = NULL; |
- ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i), |
- &iterator->nodes[i], &temp); |
+ ret |= OpenFollowingEntryFromList( |
+ forward, static_cast<Rankings::List>(i), &iterator->nodes[i], &temp); |
entries[i].swap(&temp); // The entry was already addref'd. |
} |
if (!ret) |
@@ -1643,8 +1665,8 @@ EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) { |
for (int i = 0; i < kListsToSearch; i++) { |
EntryImpl* temp = NULL; |
if (iterator->list == i) { |
- OpenFollowingEntryFromList(forward, iterator->list, |
- &iterator->nodes[i], &temp); |
+ OpenFollowingEntryFromList( |
+ forward, iterator->list, &iterator->nodes[i], &temp); |
} else { |
temp = GetEnumeratedEntry(iterator->nodes[i], |
static_cast<Rankings::List>(i)); |
@@ -1689,7 +1711,8 @@ EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) { |
return next_entry; |
} |
-bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list, |
+bool BackendImpl::OpenFollowingEntryFromList(bool forward, |
+ Rankings::List list, |
CacheRankingsBlock** from_entry, |
EntryImpl** next_entry) { |
if (disabled_) |
@@ -1699,9 +1722,9 @@ bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list, |
return false; |
Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry); |
- CacheRankingsBlock* next_block = forward ? |
- rankings_.GetNext(rankings.get(), list) : |
- rankings_.GetPrev(rankings.get(), list); |
+ CacheRankingsBlock* next_block = |
+ forward ? rankings_.GetNext(rankings.get(), list) |
+ : rankings_.GetPrev(rankings.get(), list); |
Rankings::ScopedRankingsBlock next(&rankings_, next_block); |
*from_entry = NULL; |
@@ -1854,24 +1877,34 @@ void BackendImpl::ReportStats() { |
max_size++; |
CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size); |
- CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0, |
+ CACHE_UMA(COUNTS_10000, |
+ "AverageOpenEntries2", |
+ 0, |
static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES))); |
- CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0, |
+ CACHE_UMA(COUNTS_10000, |
+ "MaxOpenEntries2", |
+ 0, |
static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES))); |
stats_.SetCounter(Stats::MAX_ENTRIES, 0); |
- CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0, |
+ CACHE_UMA(COUNTS_10000, |
+ "TotalFatalErrors", |
+ 0, |
static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR))); |
- CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0, |
+ CACHE_UMA(COUNTS_10000, |
+ "TotalDoomCache", |
+ 0, |
static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE))); |
- CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0, |
+ CACHE_UMA(COUNTS_10000, |
+ "TotalDoomRecentEntries", |
+ 0, |
static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT))); |
stats_.SetCounter(Stats::FATAL_ERROR, 0); |
stats_.SetCounter(Stats::DOOM_CACHE, 0); |
stats_.SetCounter(Stats::DOOM_RECENT, 0); |
- int age = (Time::Now() - |
- Time::FromInternalValue(data_->header.create_time)).InHours(); |
+ int age = (Time::Now() - Time::FromInternalValue(data_->header.create_time)) |
+ .InHours(); |
if (age) |
CACHE_UMA(HOURS, "FilesAge", 0, age); |
@@ -1921,7 +1954,9 @@ void BackendImpl::ReportStats() { |
CACHE_UMA(COUNTS, "EntrySize", 0, avg_size); |
CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries); |
- CACHE_UMA(PERCENTAGE, "IndexLoad", 0, |
+ CACHE_UMA(PERCENTAGE, |
+ "IndexLoad", |
+ 0, |
data_->header.num_entries * 100 / (mask_ + 1)); |
int large_entries_bytes = stats_.GetLargeEntriesSize(); |
@@ -1930,13 +1965,21 @@ void BackendImpl::ReportStats() { |
if (new_eviction_) { |
CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio()); |
- CACHE_UMA(PERCENTAGE, "NoUseRatio", 0, |
+ CACHE_UMA(PERCENTAGE, |
+ "NoUseRatio", |
+ 0, |
data_->header.lru.sizes[0] * 100 / data_->header.num_entries); |
- CACHE_UMA(PERCENTAGE, "LowUseRatio", 0, |
+ CACHE_UMA(PERCENTAGE, |
+ "LowUseRatio", |
+ 0, |
data_->header.lru.sizes[1] * 100 / data_->header.num_entries); |
- CACHE_UMA(PERCENTAGE, "HighUseRatio", 0, |
+ CACHE_UMA(PERCENTAGE, |
+ "HighUseRatio", |
+ 0, |
data_->header.lru.sizes[2] * 100 / data_->header.num_entries); |
- CACHE_UMA(PERCENTAGE, "DeletedRatio", 0, |
+ CACHE_UMA(PERCENTAGE, |
+ "DeletedRatio", |
+ 0, |
data_->header.lru.sizes[4] * 100 / data_->header.num_entries); |
} |
@@ -2051,8 +2094,8 @@ int BackendImpl::CheckAllEntries() { |
Trace("CheckAllEntries End"); |
if (num_entries + num_dirty != data_->header.num_entries) { |
- LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << |
- " " << data_->header.num_entries; |
+ LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty << " " |
+ << data_->header.num_entries; |
DCHECK_LT(num_entries, data_->header.num_entries); |
return ERR_NUM_ENTRIES_MISMATCH; |
} |