Index: net/disk_cache/block_files.cc |
=================================================================== |
--- net/disk_cache/block_files.cc (revision 232523) |
+++ net/disk_cache/block_files.cc (working copy) |
@@ -5,7 +5,7 @@ |
#include "net/disk_cache/block_files.h" |
#include "base/atomicops.h" |
-#include "base/files/file_path.h" |
+#include "base/file_util.h" |
#include "base/metrics/histogram.h" |
#include "base/strings/string_util.h" |
#include "base/strings/stringprintf.h" |
@@ -19,7 +19,8 @@ |
namespace { |
-const char* kBlockName = "data_"; |
+const char kBlockName[] = "data_"; |
+const char kBlockDataPostfix[] = "_d"; |
// This array is used to perform a fast lookup of the nibble bit pattern to the |
// type of entry that can be stored there (number of consecutive blocks). |
@@ -152,9 +153,10 @@ |
// Note that this is a simplified version of DeleteMapBlock(). |
bool BlockHeader::UsedMapBlock(int index, int size) { |
- if (size < 0 || size > kMaxNumBlocks) |
+ if (size < 0 || size > kMaxNumBlocks) { |
+ NOTREACHED(); |
return false; |
- |
+ } |
int byte_index = index / 8; |
uint8* byte_map = reinterpret_cast<uint8*>(header_->allocation_map); |
uint8 map_block = byte_map[byte_index]; |
@@ -250,10 +252,6 @@ |
return header_->next_file; |
} |
-int BlockHeader::Size() const { |
- return static_cast<int>(sizeof(*header_)); |
-} |
- |
BlockFileHeader* BlockHeader::Header() { |
return header_; |
} |
@@ -261,7 +259,11 @@ |
// ------------------------------------------------------------------------ |
BlockFiles::BlockFiles(const base::FilePath& path) |
- : init_(false), zero_buffer_(NULL), path_(path) { |
+ : init_(false), |
+ small_steps_(false), |
+ data_offset_(0), |
+ zero_buffer_(NULL), |
+ path_(path) { |
} |
BlockFiles::~BlockFiles() { |
@@ -270,15 +272,17 @@ |
CloseFiles(); |
} |
-bool BlockFiles::Init(bool create_files) { |
+bool BlockFiles::Init(bool create_files, int num_files) { |
DCHECK(!init_); |
if (init_) |
return false; |
+ data_offset_ = num_files > kFirstAdditionalBlockFile ? 0 : kBlockHeaderSize; |
thread_checker_.reset(new base::ThreadChecker); |
- block_files_.resize(kFirstAdditionalBlockFile); |
- for (int i = 0; i < kFirstAdditionalBlockFile; i++) { |
+ block_headers_.resize(num_files); |
+ block_data_.resize(num_files); |
+ for (int i = 0; i < num_files; i++) { |
if (create_files) |
if (!CreateBlockFile(i, static_cast<FileType>(i + 1), true)) |
return false; |
@@ -289,29 +293,37 @@ |
// Walk this chain of files removing empty ones. |
if (!RemoveEmptyFile(static_cast<FileType>(i + 1))) |
return false; |
+ |
+ if (!data_offset_ && !PreallocateSpace(static_cast<FileType>(i + 1))) |
+ return false; |
} |
init_ = true; |
return true; |
} |
+void BlockFiles::GetBitmaps(int num_files, BlockFilesBitmaps* bitmaps) { |
+ bitmaps->clear(); |
+ bitmaps->resize(num_files); |
+ |
+ for (int i = 0; i < num_files; i++) { |
+ // Only the block_file argument is relevant for what we want. |
+ Addr address(BLOCK_256, 1, i, 0); |
+ BlockHeader header(GetFileHeader(address)); |
+ (*bitmaps)[i] = header; |
+ } |
+} |
+ |
MappedFile* BlockFiles::GetFile(Addr address) { |
- DCHECK(thread_checker_->CalledOnValidThread()); |
- DCHECK_GE(block_files_.size(), |
- static_cast<size_t>(kFirstAdditionalBlockFile)); |
- DCHECK(address.is_block_file() || !address.is_initialized()); |
- if (!address.is_initialized()) |
+ int file_index = GetFileIndex(address); |
+ if (file_index < 0) |
return NULL; |
- int file_index = address.FileNumber(); |
- if (static_cast<unsigned int>(file_index) >= block_files_.size() || |
- !block_files_[file_index]) { |
- // We need to open the file |
- if (!OpenBlockFile(file_index)) |
- return NULL; |
- } |
- DCHECK_GE(block_files_.size(), static_cast<unsigned int>(file_index)); |
- return block_files_[file_index]; |
+ if (data_offset_) |
+ return block_headers_[file_index]; |
+ |
+ DCHECK(block_data_.size() >= static_cast<unsigned int>(file_index)); |
+ return block_data_[file_index]; |
} |
bool BlockFiles::CreateBlock(FileType block_type, int block_count, |
@@ -360,12 +372,11 @@ |
Trace("DeleteBlock 0x%x", address.value()); |
size_t size = address.BlockSize() * address.num_blocks(); |
- size_t offset = address.start_block() * address.BlockSize() + |
- kBlockHeaderSize; |
+ size_t offset = address.start_block() * address.BlockSize() + data_offset_; |
if (deep) |
file->Write(zero_buffer_, size, offset); |
- BlockHeader file_header(file); |
+ BlockHeader file_header(GetFileHeader(address)); |
file_header.DeleteMapBlock(address.start_block(), address.num_blocks()); |
file->Flush(); |
@@ -385,13 +396,20 @@ |
DCHECK(thread_checker_->CalledOnValidThread()); |
} |
init_ = false; |
- for (unsigned int i = 0; i < block_files_.size(); i++) { |
- if (block_files_[i]) { |
- block_files_[i]->Release(); |
- block_files_[i] = NULL; |
+ for (unsigned int i = 0; i < block_headers_.size(); i++) { |
+ if (block_headers_[i]) { |
+ block_headers_[i]->Release(); |
+ block_headers_[i] = NULL; |
} |
} |
- block_files_.clear(); |
+ for (unsigned int i = 0; i < block_data_.size(); i++) { |
+ if (block_data_[i]) { |
+ block_data_[i]->Release(); |
+ block_data_[i] = NULL; |
+ } |
+ } |
+ block_headers_.clear(); |
+ block_data_.clear(); |
} |
void BlockFiles::ReportStats() { |
@@ -419,31 +437,19 @@ |
if (!address.is_initialized() || address.is_separate_file()) |
return false; |
- MappedFile* file = GetFile(address); |
+ MappedFile* file = GetFileHeader(address); |
if (!file) |
return false; |
BlockHeader header(file); |
bool rv = header.UsedMapBlock(address.start_block(), address.num_blocks()); |
DCHECK(rv); |
- |
- static bool read_contents = false; |
- if (read_contents) { |
- scoped_ptr<char[]> buffer; |
- buffer.reset(new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4]); |
- size_t size = address.BlockSize() * address.num_blocks(); |
- size_t offset = address.start_block() * address.BlockSize() + |
- kBlockHeaderSize; |
- bool ok = file->Read(buffer.get(), size, offset); |
- DCHECK(ok); |
- } |
- |
return rv; |
#endif |
} |
bool BlockFiles::CreateBlockFile(int index, FileType file_type, bool force) { |
- base::FilePath name = Name(index); |
+ base::FilePath name = HeaderName(index); |
int flags = |
force ? base::PLATFORM_FILE_CREATE_ALWAYS : base::PLATFORM_FILE_CREATE; |
flags |= base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_EXCLUSIVE_WRITE; |
@@ -456,22 +462,34 @@ |
BlockFileHeader header; |
memset(&header, 0, sizeof(header)); |
header.magic = kBlockMagic; |
- header.version = kBlockVersion2; |
+ header.version = data_offset_ ? kBlockVersion2 : kBlockCurrentVersion; |
header.entry_size = Addr::BlockSizeForFileType(file_type); |
header.this_file = static_cast<int16>(index); |
DCHECK(index <= kint16max && index >= 0); |
- return file->Write(&header, sizeof(header), 0); |
+ if (!file->Write(&header, sizeof(header), 0)) |
+ return false; |
+ |
+ if (header.version == kBlockVersion2) |
+ return true; |
+ |
+ // Now create another file for the data itself. |
+ name = DataName(index); |
+ file = new File(base::CreatePlatformFile(name, flags, NULL, NULL)); |
+ return file->IsValid(); |
} |
bool BlockFiles::OpenBlockFile(int index) { |
- if (block_files_.size() - 1 < static_cast<unsigned int>(index)) { |
+ if (block_headers_.size() - 1 < static_cast<unsigned int>(index)) { |
DCHECK(index > 0); |
- int to_add = index - static_cast<int>(block_files_.size()) + 1; |
- block_files_.resize(block_files_.size() + to_add); |
+ int to_add = index - static_cast<int>(block_headers_.size()) + 1; |
+ block_headers_.resize(block_headers_.size() + to_add); |
+ block_data_.resize(block_data_.size() + to_add); |
} |
+ DCHECK_EQ(block_headers_.size(), block_data_.size()); |
+ DCHECK(!block_headers_[index]); |
- base::FilePath name = Name(index); |
+ base::FilePath name = HeaderName(index); |
scoped_refptr<MappedFile> file(new MappedFile()); |
if (!file->Init(name, kBlockHeaderSize)) { |
@@ -485,55 +503,102 @@ |
return false; |
} |
- BlockHeader file_header(file.get()); |
- BlockFileHeader* header = file_header.Header(); |
- if (kBlockMagic != header->magic || kBlockVersion2 != header->version) { |
+ BlockHeader file_header(file); |
+ if (kBlockMagic != file_header.Header()->magic || |
+ (kBlockVersion2 != file_header.Header()->version && |
+ kBlockCurrentVersion != file_header.Header()->version)) { |
LOG(ERROR) << "Invalid file version or magic " << name.value(); |
return false; |
} |
- if (header->updating || !file_header.ValidateCounters()) { |
- // Last instance was not properly shutdown, or counters are out of sync. |
- if (!FixBlockFileHeader(file.get())) { |
- LOG(ERROR) << "Unable to fix block file " << name.value(); |
+ if ((kBlockCurrentVersion == file_header.Header()->version && data_offset_) || |
+ (kBlockVersion2 == file_header.Header()->version && !data_offset_)) { |
+ LOG(ERROR) << "Unexpected file version" << name.value(); |
+ return false; |
+ } |
+ |
+ if (kBlockVersion2 == file_header.Header()->version) { |
+ if (static_cast<int>(file_len) < |
+ file_header.Header()->max_entries * file_header.Header()->entry_size + |
+ kBlockHeaderSize) { |
+ LOG(ERROR) << "File too small " << name.value(); |
return false; |
} |
+ |
+ if (index == 0) { |
+ // Load the links file into memory with a single read. |
+ scoped_ptr<char[]> buf(new char[file_len]); |
+ if (!file->Read(buf.get(), file_len, 0)) |
+ return false; |
+ } |
+ |
+ ScopedFlush flush(file); |
+ file.swap(&block_headers_[index]); |
+ |
+ if (file_header.Header()->updating || !file_header.ValidateCounters()) { |
+ // Last instance was not properly shutdown, or counters are out of sync. |
+ if (!FixBlockFileHeader(index)) { |
+ LOG(ERROR) << "Unable to fix block file " << name.value(); |
+ file.swap(&block_headers_[index]); |
+ return false; |
+ } |
+ } |
+ return true; |
} |
- if (static_cast<int>(file_len) < |
- header->max_entries * header->entry_size + kBlockHeaderSize) { |
+ DCHECK(!block_data_[index]); |
+ |
+ // Open the data file. |
+ name = DataName(index); |
+ scoped_refptr<MappedFile> data_file(new MappedFile()); |
+ if (!data_file->InitNoMap(name)) { |
+ LOG(ERROR) << "Failed to open " << name.value(); |
+ return false; |
+ } |
+ |
+ if (static_cast<int>(data_file->GetLength()) < |
+ file_header.Header()->max_entries * file_header.Header()->entry_size) { |
LOG(ERROR) << "File too small " << name.value(); |
return false; |
} |
- if (index == 0) { |
- // Load the links file into memory with a single read. |
- scoped_ptr<char[]> buf(new char[file_len]); |
- if (!file->Read(buf.get(), file_len, 0)) |
+ file.swap(&block_headers_[index]); |
+ data_file.swap(&block_data_[index]); |
+ |
+ if (file_header.Header()->updating || !file_header.ValidateCounters()) { |
+ // Last instance was not properly shutdown, or counters are out of sync. |
+ if (!FixBlockFileHeader(index)) { |
+ LOG(ERROR) << "Unable to fix block file " << name.value(); |
+ file.swap(&block_headers_[index]); |
+ data_file.swap(&block_data_[index]); |
return false; |
+ } |
} |
- ScopedFlush flush(file.get()); |
- DCHECK(!block_files_[index]); |
- file.swap(&block_files_[index]); |
return true; |
} |
-bool BlockFiles::GrowBlockFile(MappedFile* file, BlockFileHeader* header) { |
+bool BlockFiles::GrowBlockFile(BlockFileHeader* header) { |
if (kMaxBlocks == header->max_entries) |
return false; |
+ int file_index = header->this_file; |
+ MappedFile* file = data_offset_ ? block_headers_[file_index] : |
+ block_data_[file_index]; |
ScopedFlush flush(file); |
- DCHECK(!header->empty[3]); |
- int new_size = header->max_entries + 1024; |
+ if (data_offset_) |
+ DCHECK(!header->empty[3]); |
+ |
+ int step_size = small_steps_ ? 32 : kNumExtraBlocks; |
+ int new_size = header->max_entries + step_size; |
if (new_size > kMaxBlocks) |
new_size = kMaxBlocks; |
- int new_size_bytes = new_size * header->entry_size + sizeof(*header); |
+ int new_size_bytes = new_size * header->entry_size + data_offset_; |
if (!file->SetLength(new_size_bytes)) { |
// Most likely we are trying to truncate the file, so the header is wrong. |
- if (header->updating < 10 && !FixBlockFileHeader(file)) { |
+ if (header->updating < 10 && !FixBlockFileHeader(file_index)) { |
// If we can't fix the file increase the lock guard so we'll pick it on |
// the next start and replace it. |
header->updating = 100; |
@@ -543,7 +608,7 @@ |
} |
FileLock lock(header); |
- header->empty[3] = (new_size - header->max_entries) / 4; // 4 blocks entries |
+ header->empty[3] += (new_size - header->max_entries) / 4; // 4 blocks entries |
header->max_entries = new_size; |
return true; |
@@ -551,7 +616,7 @@ |
MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) { |
COMPILE_ASSERT(RANKINGS == 1, invalid_file_type); |
- MappedFile* file = block_files_[block_type - 1]; |
+ MappedFile* file = block_headers_[block_type - 1]; |
BlockHeader file_header(file); |
TimeTicks start = TimeTicks::Now(); |
@@ -564,7 +629,7 @@ |
continue; |
} |
- if (!GrowBlockFile(file, file_header.Header())) |
+ if (!GrowBlockFile(file_header.Header())) |
return NULL; |
break; |
} |
@@ -593,9 +658,35 @@ |
// Only the block_file argument is relevant for what we want. |
Addr address(BLOCK_256, 1, new_file, 0); |
- return GetFile(address); |
+ return GetFileHeader(address); |
} |
+int BlockFiles::GetFileIndex(Addr address) { |
+ DCHECK(thread_checker_->CalledOnValidThread()); |
+ DCHECK(block_headers_.size() >= 4); |
+ DCHECK(address.is_block_file() || !address.is_initialized()); |
+ if (!address.is_initialized()) |
+ return -1; |
+ |
+ int file_index = address.FileNumber(); |
+ if (static_cast<unsigned int>(file_index) >= block_headers_.size() || |
+ !block_headers_[file_index]) { |
+ // We need to open the file |
+ if (!OpenBlockFile(file_index)) |
+ return -1; |
+ } |
+ DCHECK(block_headers_.size() >= static_cast<unsigned int>(file_index)); |
+ return file_index; |
+} |
+ |
+MappedFile* BlockFiles::GetFileHeader(Addr address) { |
+ int file_index = GetFileIndex(address); |
+ if (file_index < 0) |
+ return NULL; |
+ |
+ return block_headers_[file_index]; |
+} |
+ |
int BlockFiles::CreateNextBlockFile(FileType block_type) { |
for (int i = kFirstAdditionalBlockFile; i <= kMaxBlockFile; i++) { |
if (CreateBlockFile(i, block_type, false)) |
@@ -607,13 +698,13 @@ |
// We walk the list of files for this particular block type, deleting the ones |
// that are empty. |
bool BlockFiles::RemoveEmptyFile(FileType block_type) { |
- MappedFile* file = block_files_[block_type - 1]; |
+ MappedFile* file = block_headers_[block_type - 1]; |
BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer()); |
while (header->next_file) { |
// Only the block_file argument is relevant for what we want. |
Addr address(BLOCK_256, 1, header->next_file, 0); |
- MappedFile* next_file = GetFile(address); |
+ MappedFile* next_file = GetFileHeader(address); |
if (!next_file) |
return false; |
@@ -624,21 +715,32 @@ |
// Delete next_file and remove it from the chain. |
int file_index = header->next_file; |
header->next_file = next_header->next_file; |
- DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index)); |
+ DCHECK(block_headers_.size() >= static_cast<unsigned int>(file_index)); |
file->Flush(); |
// We get a new handle to the file and release the old one so that the |
// file gets unmmaped... so we can delete it. |
- base::FilePath name = Name(file_index); |
+ base::FilePath name = HeaderName(file_index); |
scoped_refptr<File> this_file(new File(false)); |
this_file->Init(name); |
- block_files_[file_index]->Release(); |
- block_files_[file_index] = NULL; |
+ block_headers_[file_index]->Release(); |
+ block_headers_[file_index] = NULL; |
int failure = DeleteCacheFile(name) ? 0 : 1; |
- UMA_HISTOGRAM_COUNTS("DiskCache.DeleteFailed2", failure); |
if (failure) |
LOG(ERROR) << "Failed to delete " << name.value() << " from the cache."; |
+ |
+ if (!data_offset_) { |
+ name = DataName(file_index); |
+ if (!DeleteCacheFile(name)) { |
+ failure = 1; |
+ LOG(ERROR) << "Failed to delete " << name.value() << |
+ " from the cache."; |
+ } |
+ block_data_[file_index]->Release(); |
+ block_data_[file_index] = NULL; |
+ } |
+ UMA_HISTOGRAM_COUNTS("DiskCache.DeleteFailed2", failure); |
continue; |
} |
@@ -648,46 +750,91 @@ |
return true; |
} |
+bool BlockFiles::PreallocateSpace(FileType block_type) { |
+ MappedFile* file = block_headers_[block_type - 1]; |
+ BlockHeader file_header(file); |
+ |
+ int empty_blocks = file_header.EmptyBlocks(); |
+ while (file_header.Header()->next_file) { |
+ // Only the block_file argument is relevant for what we want. |
+ Addr address(BLOCK_256, 1, file_header.Header()->next_file, 0); |
+ MappedFile* next_file = GetFileHeader(address); |
+ if (!next_file) |
+ return false; |
+ |
+ BlockHeader next_header(next_file); |
+ empty_blocks += next_header.EmptyBlocks(); |
+ |
+ file_header = next_header; |
+ file = next_file; |
+ } |
+ if (empty_blocks > kNumExtraBlocks * 2 / 3) |
+ return true; |
+ |
+ // Restart the search. |
+ file = block_headers_[block_type - 1]; |
+ file_header = BlockHeader(file); |
+ while (kMaxBlocks == file_header.Header()->max_entries) { |
+ file = NextFile(file); |
+ if (!file) |
+ return false; |
+ file_header = BlockHeader(file); |
+ } |
+ return GrowBlockFile(file_header.Header()); |
+} |
+ |
// Note that we expect to be called outside of a FileLock... however, we cannot |
// DCHECK on header->updating because we may be fixing a crash. |
-bool BlockFiles::FixBlockFileHeader(MappedFile* file) { |
- ScopedFlush flush(file); |
- BlockHeader file_header(file); |
- int file_size = static_cast<int>(file->GetLength()); |
- if (file_size < file_header.Size()) |
+bool BlockFiles::FixBlockFileHeader(int index) { |
+ DCHECK_GE(block_headers_.size(), static_cast<unsigned int>(index)); |
+ MappedFile* header_file = block_headers_[index]; |
+ ScopedFlush flush(header_file); |
+ BlockHeader file_header(header_file); |
+ |
+ MappedFile* data_file = data_offset_ ? header_file : block_data_[index]; |
+ int file_size = static_cast<int>(data_file->GetLength()); |
+ if (file_size < data_offset_) |
return false; // file_size > 2GB is also an error. |
const int kMinBlockSize = 36; |
const int kMaxBlockSize = 4096; |
- BlockFileHeader* header = file_header.Header(); |
- if (header->entry_size < kMinBlockSize || |
- header->entry_size > kMaxBlockSize || header->num_entries < 0) |
+ if (file_header.Header()->entry_size < kMinBlockSize || |
+ file_header.Header()->entry_size > kMaxBlockSize || |
+ file_header.Header()->num_entries < 0) |
return false; |
// Make sure that we survive crashes. |
- header->updating = 1; |
- int expected = header->entry_size * header->max_entries + file_header.Size(); |
+ file_header.Header()->updating = 1; |
+ int expected = |
+ file_header.Header()->entry_size * file_header.Header()->max_entries + |
+ data_offset_; |
if (file_size != expected) { |
- int max_expected = header->entry_size * kMaxBlocks + file_header.Size(); |
- if (file_size < expected || header->empty[3] || file_size > max_expected) { |
+ int max_expected = file_header.Header()->entry_size * kMaxBlocks + |
+ data_offset_; |
+ if (file_size < expected || file_header.Header()->empty[3] || |
+ file_size > max_expected) { |
NOTREACHED(); |
LOG(ERROR) << "Unexpected file size"; |
return false; |
} |
// We were in the middle of growing the file. |
- int num_entries = (file_size - file_header.Size()) / header->entry_size; |
- header->max_entries = num_entries; |
+ int num_entries = (file_size - data_offset_) / |
+ file_header.Header()->entry_size; |
+ file_header.Header()->max_entries = num_entries; |
} |
file_header.FixAllocationCounters(); |
int empty_blocks = file_header.EmptyBlocks(); |
- if (empty_blocks + header->num_entries > header->max_entries) |
- header->num_entries = header->max_entries - empty_blocks; |
+ if (empty_blocks + file_header.Header()->num_entries > |
+ file_header.Header()->max_entries) { |
+ file_header.Header()->num_entries = file_header.Header()->max_entries - |
+ empty_blocks; |
+ } |
if (!file_header.ValidateCounters()) |
return false; |
- header->updating = 0; |
+ file_header.Header()->updating = 0; |
return true; |
} |
@@ -700,15 +847,15 @@ |
*used_count = 0; |
*load = 0; |
for (;;) { |
- if (!block_files_[index] && !OpenBlockFile(index)) |
+ if (!block_headers_[index] && !OpenBlockFile(index)) |
return; |
BlockFileHeader* header = |
- reinterpret_cast<BlockFileHeader*>(block_files_[index]->buffer()); |
+ reinterpret_cast<BlockFileHeader*>(block_headers_[index]->buffer()); |
max_blocks += header->max_entries; |
int used = header->max_entries; |
- for (int i = 0; i < kMaxNumBlocks; i++) { |
+ for (int i = 0; i < 4; i++) { |
used -= header->empty[i] * (i + 1); |
DCHECK_GE(used, 0); |
} |
@@ -722,11 +869,18 @@ |
*load = *used_count * 100 / max_blocks; |
} |
-base::FilePath BlockFiles::Name(int index) { |
+base::FilePath BlockFiles::HeaderName(int index) { |
// The file format allows for 256 files. |
DCHECK(index < 256 && index >= 0); |
std::string tmp = base::StringPrintf("%s%d", kBlockName, index); |
return path_.AppendASCII(tmp); |
} |
+base::FilePath BlockFiles::DataName(int index) { |
+ // The file format allows for 256 files. |
+ DCHECK(index < 256 || index >= 0); |
+ std::string tmp = base::StringPrintf("%s%d_d", kBlockName, index); |
+ return path_.AppendASCII(tmp); |
+} |
+ |
} // namespace disk_cache |