Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(64)

Unified Diff: net/disk_cache/block_files.cc

Issue 15203004: Disk cache: Reference CL for the implementation of file format version 3. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: IndexTable review Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/disk_cache/block_files.h ('k') | net/disk_cache/block_files_unittest.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/disk_cache/block_files.cc
===================================================================
--- net/disk_cache/block_files.cc (revision 199883)
+++ net/disk_cache/block_files.cc (working copy)
@@ -19,7 +19,8 @@
namespace {
-const char* kBlockName = "data_";
+const char kBlockName[] = "data_";
+const char kBlockDataPostfix[] = "_d";
// This array is used to perform a fast lookup of the nibble bit pattern to the
// type of entry that can be stored there (number of consecutive blocks).
@@ -32,15 +33,37 @@
return s_types[value];
}
-void FixAllocationCounters(disk_cache::BlockFileHeader* header);
+} // namespace
-// Creates a new entry on the allocation map, updating the apropriate counters.
-// target is the type of block to use (number of empty blocks), and size is the
-// actual number of blocks to use.
-bool CreateMapBlock(int target, int size, disk_cache::BlockFileHeader* header,
- int* index) {
- if (target <= 0 || target > disk_cache::kMaxNumBlocks ||
- size <= 0 || size > disk_cache::kMaxNumBlocks) {
+namespace disk_cache {
+
+BlockHeader::BlockHeader() : header_(NULL) {
+}
+
+BlockHeader::BlockHeader(BlockFileHeader* header) : header_(header) {
+}
+
+BlockHeader::BlockHeader(MappedFile* file)
+ : header_(reinterpret_cast<BlockFileHeader*>(file->buffer())) {
+}
+
+BlockHeader::BlockHeader(const BlockHeader& other) : header_(other.header_) {
+}
+
+BlockHeader::~BlockHeader() {
+}
+
+bool BlockHeader::CreateMapBlock(int size, int* index) {
+ DCHECK(size > 0 && size <= kMaxNumBlocks);
+ int target = 0;
+ for (int i = size; i <= kMaxNumBlocks; i++) {
+ if (header_->empty[i - 1]) {
+ target = i;
+ break;
+ }
+ }
+
+ if (!target) {
NOTREACHED();
return false;
}
@@ -48,35 +71,35 @@
TimeTicks start = TimeTicks::Now();
// We are going to process the map on 32-block chunks (32 bits), and on every
// chunk, iterate through the 8 nibbles where the new block can be located.
- int current = header->hints[target - 1];
- for (int i = 0; i < header->max_entries / 32; i++, current++) {
- if (current == header->max_entries / 32)
+ int current = header_->hints[target - 1];
+ for (int i = 0; i < header_->max_entries / 32; i++, current++) {
+ if (current == header_->max_entries / 32)
current = 0;
- uint32 map_block = header->allocation_map[current];
+ uint32 map_block = header_->allocation_map[current];
for (int j = 0; j < 8; j++, map_block >>= 4) {
if (GetMapBlockType(map_block) != target)
continue;
- disk_cache::FileLock lock(header);
+ disk_cache::FileLock lock(header_);
int index_offset = j * 4 + 4 - target;
*index = current * 32 + index_offset;
DCHECK_EQ(*index / 4, (*index + size - 1) / 4);
uint32 to_add = ((1 << size) - 1) << index_offset;
- header->num_entries++;
+ header_->num_entries++;
// Note that there is no race in the normal sense here, but if we enforce
// the order of memory accesses between num_entries and allocation_map, we
// can assert that even if we crash here, num_entries will never be less
// than the actual number of used blocks.
base::subtle::MemoryBarrier();
- header->allocation_map[current] |= to_add;
+ header_->allocation_map[current] |= to_add;
- header->hints[target - 1] = current;
- header->empty[target - 1]--;
- DCHECK_GE(header->empty[target - 1], 0);
+ header_->hints[target - 1] = current;
+ header_->empty[target - 1]--;
+ DCHECK_GE(header_->empty[target - 1], 0);
if (target != size) {
- header->empty[target - size - 1]++;
+ header_->empty[target - size - 1]++;
}
HISTOGRAM_TIMES("DiskCache.CreateBlock", TimeTicks::Now() - start);
return true;
@@ -86,33 +109,31 @@
// It is possible to have an undetected corruption (for example when the OS
// crashes), fix it here.
LOG(ERROR) << "Failing CreateMapBlock";
- FixAllocationCounters(header);
+ FixAllocationCounters();
return false;
}
-// Deletes the block pointed by index from allocation_map, and updates the
-// relevant counters on the header.
-void DeleteMapBlock(int index, int size, disk_cache::BlockFileHeader* header) {
- if (size < 0 || size > disk_cache::kMaxNumBlocks) {
+void BlockHeader::DeleteMapBlock(int index, int size) {
+ if (size < 0 || size > kMaxNumBlocks) {
NOTREACHED();
return;
}
TimeTicks start = TimeTicks::Now();
int byte_index = index / 8;
- uint8* byte_map = reinterpret_cast<uint8*>(header->allocation_map);
+ uint8* byte_map = reinterpret_cast<uint8*>(header_->allocation_map);
uint8 map_block = byte_map[byte_index];
if (index % 8 >= 4)
map_block >>= 4;
- // See what type of block will be availabe after we delete this one.
+ // See what type of block will be available after we delete this one.
int bits_at_end = 4 - size - index % 4;
uint8 end_mask = (0xf << (4 - bits_at_end)) & 0xf;
bool update_counters = (map_block & end_mask) == 0;
uint8 new_value = map_block & ~(((1 << size) - 1) << (index % 4));
int new_type = GetMapBlockType(new_value);
- disk_cache::FileLock lock(header);
+ disk_cache::FileLock lock(header_);
DCHECK((((1 << size) - 1) << (index % 8)) < 0x100);
uint8 to_clear = ((1 << size) - 1) << (index % 8);
DCHECK((byte_map[byte_index] & to_clear) == to_clear);
@@ -120,26 +141,24 @@
if (update_counters) {
if (bits_at_end)
- header->empty[bits_at_end - 1]--;
- header->empty[new_type - 1]++;
- DCHECK_GE(header->empty[bits_at_end - 1], 0);
+ header_->empty[bits_at_end - 1]--;
+ header_->empty[new_type - 1]++;
+ DCHECK_GE(header_->empty[bits_at_end - 1], 0);
}
base::subtle::MemoryBarrier();
- header->num_entries--;
- DCHECK_GE(header->num_entries, 0);
+ header_->num_entries--;
+ DCHECK_GE(header_->num_entries, 0);
HISTOGRAM_TIMES("DiskCache.DeleteBlock", TimeTicks::Now() - start);
}
-#ifndef NDEBUG
-// Returns true if the specified block is used. Note that this is a simplified
-// version of DeleteMapBlock().
-bool UsedMapBlock(int index, int size, disk_cache::BlockFileHeader* header) {
- if (size < 0 || size > disk_cache::kMaxNumBlocks) {
+// Note that this is a simplified version of DeleteMapBlock().
+bool BlockHeader::UsedMapBlock(int index, int size) {
+ if (size < 0 || size > kMaxNumBlocks) {
NOTREACHED();
return false;
}
int byte_index = index / 8;
- uint8* byte_map = reinterpret_cast<uint8*>(header->allocation_map);
+ uint8* byte_map = reinterpret_cast<uint8*>(header_->allocation_map);
uint8 map_block = byte_map[byte_index];
if (index % 8 >= 4)
@@ -149,39 +168,34 @@
uint8 to_clear = ((1 << size) - 1) << (index % 8);
return ((byte_map[byte_index] & to_clear) == to_clear);
}
-#endif // NDEBUG
-// Restores the "empty counters" and allocation hints.
-void FixAllocationCounters(disk_cache::BlockFileHeader* header) {
- for (int i = 0; i < disk_cache::kMaxNumBlocks; i++) {
- header->hints[i] = 0;
- header->empty[i] = 0;
+void BlockHeader::FixAllocationCounters() {
+ for (int i = 0; i < kMaxNumBlocks; i++) {
+ header_->hints[i] = 0;
+ header_->empty[i] = 0;
}
- for (int i = 0; i < header->max_entries / 32; i++) {
- uint32 map_block = header->allocation_map[i];
+ for (int i = 0; i < header_->max_entries / 32; i++) {
+ uint32 map_block = header_->allocation_map[i];
for (int j = 0; j < 8; j++, map_block >>= 4) {
int type = GetMapBlockType(map_block);
if (type)
- header->empty[type -1]++;
+ header_->empty[type -1]++;
}
}
}
-// Returns true if the current block file should not be used as-is to store more
-// records. |block_count| is the number of blocks to allocate.
-bool NeedToGrowBlockFile(const disk_cache::BlockFileHeader* header,
- int block_count) {
+bool BlockHeader::NeedToGrowBlockFile(int block_count) const {
bool have_space = false;
int empty_blocks = 0;
- for (int i = 0; i < disk_cache::kMaxNumBlocks; i++) {
- empty_blocks += header->empty[i] * (i + 1);
- if (i >= block_count - 1 && header->empty[i])
+ for (int i = 0; i < kMaxNumBlocks; i++) {
+ empty_blocks += header_->empty[i] * (i + 1);
+ if (i >= block_count - 1 && header_->empty[i])
have_space = true;
}
- if (header->next_file && (empty_blocks < disk_cache::kMaxBlocks / 10)) {
+ if (header_->next_file && (empty_blocks < kMaxBlocks / 10)) {
// This file is almost full but we already created another one, don't use
// this file yet so that it is easier to find empty blocks when we start
// using this file again.
@@ -190,36 +204,66 @@
return !have_space;
}
-// Returns the number of empty blocks for this file.
-int EmptyBlocks(const disk_cache::BlockFileHeader* header) {
+bool BlockHeader::CanAllocate(int block_count) const {
+ DCHECK_GT(block_count, 0);
+ for (int i = block_count - 1; i < kMaxNumBlocks; i++) {
+ if (header_->empty[i])
+ return true;
+ }
+
+ return false;
+}
+
+int BlockHeader::EmptyBlocks() const {
int empty_blocks = 0;
- for (int i = 0; i < disk_cache::kMaxNumBlocks; i++) {
- empty_blocks += header->empty[i] * (i + 1);
- if (header->empty[i] < 0)
- return false;
+ for (int i = 0; i < kMaxNumBlocks; i++) {
+ empty_blocks += header_->empty[i] * (i + 1);
+ if (header_->empty[i] < 0)
+ return 0;
}
return empty_blocks;
}
-// Returns true if the counters look OK.
-bool ValidateCounters(const disk_cache::BlockFileHeader* header) {
- if (header->max_entries < 0 || header->max_entries > disk_cache::kMaxBlocks ||
- header->num_entries < 0)
+int BlockHeader::MinimumAllocations() const {
+ return header_->empty[kMaxNumBlocks - 1];
+}
+
+int BlockHeader::Capacity() const {
+ return header_->max_entries;
+}
+
+bool BlockHeader::ValidateCounters() const {
+ if (header_->max_entries < 0 || header_->max_entries > kMaxBlocks ||
+ header_->num_entries < 0)
return false;
- int empty_blocks = EmptyBlocks(header);
- if (empty_blocks + header->num_entries > header->max_entries)
+ int empty_blocks = EmptyBlocks();
+ if (empty_blocks + header_->num_entries > header_->max_entries)
return false;
return true;
}
-} // namespace
+int BlockHeader::FileId() const {
+ return header_->this_file;
+}
-namespace disk_cache {
+int BlockHeader::NextFileId() const {
+ return header_->next_file;
+}
+BlockFileHeader* BlockHeader::Header() {
+ return header_;
+}
+
+// ------------------------------------------------------------------------
+
BlockFiles::BlockFiles(const base::FilePath& path)
- : init_(false), zero_buffer_(NULL), path_(path) {
+ : init_(false),
+ small_steps_(false),
+ data_offset_(0),
+ zero_buffer_(NULL),
+ path_(path) {
}
BlockFiles::~BlockFiles() {
@@ -228,15 +272,17 @@
CloseFiles();
}
-bool BlockFiles::Init(bool create_files) {
+bool BlockFiles::Init(bool create_files, int num_files) {
DCHECK(!init_);
if (init_)
return false;
+ data_offset_ = num_files > kFirstAdditionalBlockFile ? 0 : kBlockHeaderSize;
thread_checker_.reset(new base::ThreadChecker);
- block_files_.resize(kFirstAdditionalBlockFile);
- for (int i = 0; i < kFirstAdditionalBlockFile; i++) {
+ block_headers_.resize(num_files);
+ block_data_.resize(num_files);
+ for (int i = 0; i < num_files; i++) {
if (create_files)
if (!CreateBlockFile(i, static_cast<FileType>(i + 1), true))
return false;
@@ -247,36 +293,49 @@
// Walk this chain of files removing empty ones.
if (!RemoveEmptyFile(static_cast<FileType>(i + 1)))
return false;
+
+ if (!data_offset_ && !PreallocateSpace(static_cast<FileType>(i + 1)))
+ return false;
}
init_ = true;
return true;
}
+void BlockFiles::GetBitmaps(int num_files, BlockFilesBitmaps* bitmaps) {
+ bitmaps->clear();
+ bitmaps->resize(num_files);
+
+ for (int i = 0; i < num_files; i++) {
+ // Only the block_file argument is relevant for what we want.
+ Addr address(BLOCK_256, 1, i, 0);
+ BlockHeader header(GetFileHeader(address));
+ (*bitmaps)[i] = header;
+ }
+}
+
MappedFile* BlockFiles::GetFile(Addr address) {
- DCHECK(thread_checker_->CalledOnValidThread());
- DCHECK(block_files_.size() >= 4);
- DCHECK(address.is_block_file() || !address.is_initialized());
- if (!address.is_initialized())
+ int file_index = GetFileIndex(address);
+ if (file_index < 0)
return NULL;
- int file_index = address.FileNumber();
- if (static_cast<unsigned int>(file_index) >= block_files_.size() ||
- !block_files_[file_index]) {
- // We need to open the file
- if (!OpenBlockFile(file_index))
- return NULL;
- }
- DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index));
- return block_files_[file_index];
+ if (data_offset_)
+ return block_headers_[file_index];
+
+ DCHECK(block_data_.size() >= static_cast<unsigned int>(file_index));
+ return block_data_[file_index];
}
bool BlockFiles::CreateBlock(FileType block_type, int block_count,
Addr* block_address) {
DCHECK(thread_checker_->CalledOnValidThread());
- if (block_type < RANKINGS || block_type > BLOCK_4K ||
- block_count < 1 || block_count > 4)
+ DCHECK_NE(block_type, EXTERNAL);
+ DCHECK_NE(block_type, BLOCK_FILES);
+ DCHECK_NE(block_type, BLOCK_ENTRIES);
+ DCHECK_NE(block_type, BLOCK_EVICTED);
+ if (block_count < 1 || block_count > kMaxNumBlocks)
return false;
+
if (!init_)
return false;
@@ -285,22 +344,13 @@
return false;
ScopedFlush flush(file);
- BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ BlockHeader file_header(file);
- int target_size = 0;
- for (int i = block_count; i <= 4; i++) {
- if (header->empty[i - 1]) {
- target_size = i;
- break;
- }
- }
-
- DCHECK(target_size);
int index;
- if (!CreateMapBlock(target_size, block_count, header, &index))
+ if (!file_header.CreateMapBlock(block_count, &index))
return false;
- Addr address(block_type, block_count, header->this_file, index);
+ Addr address(block_type, block_count, file_header.FileId(), index);
block_address->set_value(address.value());
Trace("CreateBlock 0x%x", address.value());
return true;
@@ -322,20 +372,21 @@
Trace("DeleteBlock 0x%x", address.value());
size_t size = address.BlockSize() * address.num_blocks();
- size_t offset = address.start_block() * address.BlockSize() +
- kBlockHeaderSize;
+ size_t offset = address.start_block() * address.BlockSize() + data_offset_;
if (deep)
file->Write(zero_buffer_, size, offset);
- BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
- DeleteMapBlock(address.start_block(), address.num_blocks(), header);
+ BlockHeader file_header(GetFileHeader(address));
+ file_header.DeleteMapBlock(address.start_block(), address.num_blocks());
file->Flush();
- if (!header->num_entries) {
+ if (!file_header.Header()->num_entries) {
// This file is now empty. Let's try to delete it.
- FileType type = Addr::RequiredFileType(header->entry_size);
- if (Addr::BlockSizeForFileType(RANKINGS) == header->entry_size)
+ FileType type = Addr::RequiredFileType(file_header.Header()->entry_size);
+ if (Addr::BlockSizeForFileType(RANKINGS) ==
+ file_header.Header()->entry_size) {
type = RANKINGS;
+ }
RemoveEmptyFile(type); // Ignore failures.
}
}
@@ -345,13 +396,20 @@
DCHECK(thread_checker_->CalledOnValidThread());
}
init_ = false;
- for (unsigned int i = 0; i < block_files_.size(); i++) {
- if (block_files_[i]) {
- block_files_[i]->Release();
- block_files_[i] = NULL;
+ for (unsigned int i = 0; i < block_headers_.size(); i++) {
+ if (block_headers_[i]) {
+ block_headers_[i]->Release();
+ block_headers_[i] = NULL;
}
}
- block_files_.clear();
+ for (unsigned int i = 0; i < block_data_.size(); i++) {
+ if (block_data_[i]) {
+ block_data_[i]->Release();
+ block_data_[i] = NULL;
+ }
+ }
+ block_headers_.clear();
+ block_data_.clear();
}
void BlockFiles::ReportStats() {
@@ -379,31 +437,19 @@
if (!address.is_initialized() || address.is_separate_file())
return false;
- MappedFile* file = GetFile(address);
+ MappedFile* file = GetFileHeader(address);
if (!file)
return false;
- BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
- bool rv = UsedMapBlock(address.start_block(), address.num_blocks(), header);
+ BlockHeader header(file);
+ bool rv = header.UsedMapBlock(address.start_block(), address.num_blocks());
DCHECK(rv);
-
- static bool read_contents = false;
- if (read_contents) {
- scoped_ptr<char[]> buffer;
- buffer.reset(new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4]);
- size_t size = address.BlockSize() * address.num_blocks();
- size_t offset = address.start_block() * address.BlockSize() +
- kBlockHeaderSize;
- bool ok = file->Read(buffer.get(), size, offset);
- DCHECK(ok);
- }
-
return rv;
#endif
}
bool BlockFiles::CreateBlockFile(int index, FileType file_type, bool force) {
- base::FilePath name = Name(index);
+ base::FilePath name = HeaderName(index);
int flags =
force ? base::PLATFORM_FILE_CREATE_ALWAYS : base::PLATFORM_FILE_CREATE;
flags |= base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_EXCLUSIVE_WRITE;
@@ -414,21 +460,36 @@
return false;
BlockFileHeader header;
+ memset(&header, 0, sizeof(header));
+ header.magic = kBlockMagic;
+ header.version = data_offset_ ? kBlockVersion2 : kBlockCurrentVersion;
header.entry_size = Addr::BlockSizeForFileType(file_type);
header.this_file = static_cast<int16>(index);
DCHECK(index <= kint16max && index >= 0);
- return file->Write(&header, sizeof(header), 0);
+ if (!file->Write(&header, sizeof(header), 0))
+ return false;
+
+ if (header.version == kBlockVersion2)
+ return true;
+
+ // Now create another file for the data itself.
+ name = DataName(index);
+ file = new File(base::CreatePlatformFile(name, flags, NULL, NULL));
+ return file->IsValid();
}
bool BlockFiles::OpenBlockFile(int index) {
- if (block_files_.size() - 1 < static_cast<unsigned int>(index)) {
+ if (block_headers_.size() - 1 < static_cast<unsigned int>(index)) {
DCHECK(index > 0);
- int to_add = index - static_cast<int>(block_files_.size()) + 1;
- block_files_.resize(block_files_.size() + to_add);
+ int to_add = index - static_cast<int>(block_headers_.size()) + 1;
+ block_headers_.resize(block_headers_.size() + to_add);
+ block_data_.resize(block_data_.size() + to_add);
}
+ DCHECK_EQ(block_headers_.size(), block_data_.size());
+ DCHECK(!block_headers_[index]);
- base::FilePath name = Name(index);
+ base::FilePath name = HeaderName(index);
scoped_refptr<MappedFile> file(new MappedFile());
if (!file->Init(name, kBlockHeaderSize)) {
@@ -442,54 +503,102 @@
return false;
}
- BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
- if (kBlockMagic != header->magic || kCurrentVersion != header->version) {
+ BlockHeader file_header(file);
+ if (kBlockMagic != file_header.Header()->magic ||
+ (kBlockVersion2 != file_header.Header()->version &&
+ kBlockCurrentVersion != file_header.Header()->version)) {
LOG(ERROR) << "Invalid file version or magic " << name.value();
return false;
}
- if (header->updating || !ValidateCounters(header)) {
- // Last instance was not properly shutdown, or counters are out of sync.
- if (!FixBlockFileHeader(file)) {
- LOG(ERROR) << "Unable to fix block file " << name.value();
+ if ((kBlockCurrentVersion == file_header.Header()->version && data_offset_) ||
+ (kBlockVersion2 == file_header.Header()->version && !data_offset_)) {
+ LOG(ERROR) << "Unexpected file version" << name.value();
+ return false;
+ }
+
+ if (kBlockVersion2 == file_header.Header()->version) {
+ if (static_cast<int>(file_len) <
+ file_header.Header()->max_entries * file_header.Header()->entry_size +
+ kBlockHeaderSize) {
+ LOG(ERROR) << "File too small " << name.value();
return false;
}
+
+ if (index == 0) {
+ // Load the links file into memory with a single read.
+ scoped_ptr<char[]> buf(new char[file_len]);
+ if (!file->Read(buf.get(), file_len, 0))
+ return false;
+ }
+
+ ScopedFlush flush(file);
+ file.swap(&block_headers_[index]);
+
+ if (file_header.Header()->updating || !file_header.ValidateCounters()) {
+ // Last instance was not properly shutdown, or counters are out of sync.
+ if (!FixBlockFileHeader(index)) {
+ LOG(ERROR) << "Unable to fix block file " << name.value();
+ file.swap(&block_headers_[index]);
+ return false;
+ }
+ }
+ return true;
}
- if (static_cast<int>(file_len) <
- header->max_entries * header->entry_size + kBlockHeaderSize) {
+ DCHECK(!block_data_[index]);
+
+ // Open the data file.
+ name = DataName(index);
+ scoped_refptr<MappedFile> data_file(new MappedFile());
+ if (!data_file->InitNoMap(name)) {
+ LOG(ERROR) << "Failed to open " << name.value();
+ return false;
+ }
+
+ if (static_cast<int>(data_file->GetLength()) <
+ file_header.Header()->max_entries * file_header.Header()->entry_size) {
LOG(ERROR) << "File too small " << name.value();
return false;
}
- if (index == 0) {
- // Load the links file into memory with a single read.
- scoped_ptr<char[]> buf(new char[file_len]);
- if (!file->Read(buf.get(), file_len, 0))
+ file.swap(&block_headers_[index]);
+ data_file.swap(&block_data_[index]);
+
+ if (file_header.Header()->updating || !file_header.ValidateCounters()) {
+ // Last instance was not properly shutdown, or counters are out of sync.
+ if (!FixBlockFileHeader(index)) {
+ LOG(ERROR) << "Unable to fix block file " << name.value();
+ file.swap(&block_headers_[index]);
+ data_file.swap(&block_data_[index]);
return false;
+ }
}
- ScopedFlush flush(file);
- DCHECK(!block_files_[index]);
- file.swap(&block_files_[index]);
return true;
}
-bool BlockFiles::GrowBlockFile(MappedFile* file, BlockFileHeader* header) {
+bool BlockFiles::GrowBlockFile(BlockFileHeader* header) {
if (kMaxBlocks == header->max_entries)
return false;
+ int file_index = header->this_file;
+ MappedFile* file = data_offset_ ? block_headers_[file_index] :
+ block_data_[file_index];
ScopedFlush flush(file);
- DCHECK(!header->empty[3]);
- int new_size = header->max_entries + 1024;
+ if (data_offset_)
+ DCHECK(!header->empty[3]);
+
+ int step_size = small_steps_ ? 32 : kNumExtraBlocks;
+ int new_size = header->max_entries + step_size;
if (new_size > kMaxBlocks)
new_size = kMaxBlocks;
- int new_size_bytes = new_size * header->entry_size + sizeof(*header);
+ int new_size_bytes = new_size * header->entry_size + data_offset_;
if (!file->SetLength(new_size_bytes)) {
// Most likely we are trying to truncate the file, so the header is wrong.
- if (header->updating < 10 && !FixBlockFileHeader(file)) {
+ if (header->updating < 10 && !FixBlockFileHeader(file_index)) {
// If we can't fix the file increase the lock guard so we'll pick it on
// the next start and replace it.
header->updating = 100;
@@ -499,7 +608,7 @@
}
FileLock lock(header);
- header->empty[3] = (new_size - header->max_entries) / 4; // 4 blocks entries
+ header->empty[3] += (new_size - header->max_entries) / 4; // 4 blocks entries
header->max_entries = new_size;
return true;
@@ -507,20 +616,20 @@
MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) {
COMPILE_ASSERT(RANKINGS == 1, invalid_file_type);
- MappedFile* file = block_files_[block_type - 1];
- BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ MappedFile* file = block_headers_[block_type - 1];
+ BlockHeader file_header(file);
TimeTicks start = TimeTicks::Now();
- while (NeedToGrowBlockFile(header, block_count)) {
- if (kMaxBlocks == header->max_entries) {
+ while (file_header.NeedToGrowBlockFile(block_count)) {
+ if (kMaxBlocks == file_header.Header()->max_entries) {
file = NextFile(file);
if (!file)
return NULL;
- header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ file_header = BlockHeader(file);
continue;
}
- if (!GrowBlockFile(file, header))
+ if (!GrowBlockFile(file_header.Header()))
return NULL;
break;
}
@@ -549,9 +658,35 @@
// Only the block_file argument is relevant for what we want.
Addr address(BLOCK_256, 1, new_file, 0);
- return GetFile(address);
+ return GetFileHeader(address);
}
+int BlockFiles::GetFileIndex(Addr address) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ DCHECK(block_headers_.size() >= 4);
+ DCHECK(address.is_block_file() || !address.is_initialized());
+ if (!address.is_initialized())
+ return -1;
+
+ int file_index = address.FileNumber();
+ if (static_cast<unsigned int>(file_index) >= block_headers_.size() ||
+ !block_headers_[file_index]) {
+ // We need to open the file
+ if (!OpenBlockFile(file_index))
+ return -1;
+ }
+ DCHECK(block_headers_.size() >= static_cast<unsigned int>(file_index));
+ return file_index;
+}
+
+MappedFile* BlockFiles::GetFileHeader(Addr address) {
+ int file_index = GetFileIndex(address);
+ if (file_index < 0)
+ return NULL;
+
+ return block_headers_[file_index];
+}
+
int BlockFiles::CreateNextBlockFile(FileType block_type) {
for (int i = kFirstAdditionalBlockFile; i <= kMaxBlockFile; i++) {
if (CreateBlockFile(i, block_type, false))
@@ -563,13 +698,13 @@
// We walk the list of files for this particular block type, deleting the ones
// that are empty.
bool BlockFiles::RemoveEmptyFile(FileType block_type) {
- MappedFile* file = block_files_[block_type - 1];
+ MappedFile* file = block_headers_[block_type - 1];
BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
while (header->next_file) {
// Only the block_file argument is relevant for what we want.
Addr address(BLOCK_256, 1, header->next_file, 0);
- MappedFile* next_file = GetFile(address);
+ MappedFile* next_file = GetFileHeader(address);
if (!next_file)
return false;
@@ -580,21 +715,32 @@
// Delete next_file and remove it from the chain.
int file_index = header->next_file;
header->next_file = next_header->next_file;
- DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index));
+ DCHECK(block_headers_.size() >= static_cast<unsigned int>(file_index));
file->Flush();
// We get a new handle to the file and release the old one so that the
// file gets unmmaped... so we can delete it.
- base::FilePath name = Name(file_index);
+ base::FilePath name = HeaderName(file_index);
scoped_refptr<File> this_file(new File(false));
this_file->Init(name);
- block_files_[file_index]->Release();
- block_files_[file_index] = NULL;
+ block_headers_[file_index]->Release();
+ block_headers_[file_index] = NULL;
int failure = DeleteCacheFile(name) ? 0 : 1;
- UMA_HISTOGRAM_COUNTS("DiskCache.DeleteFailed2", failure);
if (failure)
LOG(ERROR) << "Failed to delete " << name.value() << " from the cache.";
+
+ if (!data_offset_) {
+ name = DataName(file_index);
+ if (!DeleteCacheFile(name)) {
+ failure = 1;
+ LOG(ERROR) << "Failed to delete " << name.value() <<
+ " from the cache.";
+ }
+ block_data_[file_index]->Release();
+ block_data_[file_index] = NULL;
+ }
+ UMA_HISTOGRAM_COUNTS("DiskCache.DeleteFailed2", failure);
continue;
}
@@ -604,45 +750,91 @@
return true;
}
+bool BlockFiles::PreallocateSpace(FileType block_type) {
+ MappedFile* file = block_headers_[block_type - 1];
+ BlockHeader file_header(file);
+
+ int empty_blocks = file_header.EmptyBlocks();
+ while (file_header.Header()->next_file) {
+ // Only the block_file argument is relevant for what we want.
+ Addr address(BLOCK_256, 1, file_header.Header()->next_file, 0);
+ MappedFile* next_file = GetFileHeader(address);
+ if (!next_file)
+ return false;
+
+ BlockHeader next_header(next_file);
+ empty_blocks += next_header.EmptyBlocks();
+
+ file_header = next_header;
+ file = next_file;
+ }
+ if (empty_blocks > kNumExtraBlocks * 2 / 3)
+ return true;
+
+ // Restart the search.
+ file = block_headers_[block_type - 1];
+ file_header = BlockHeader(file);
+ while (kMaxBlocks == file_header.Header()->max_entries) {
+ file = NextFile(file);
+ if (!file)
+ return false;
+ file_header = BlockHeader(file);
+ }
+ return GrowBlockFile(file_header.Header());
+}
+
// Note that we expect to be called outside of a FileLock... however, we cannot
// DCHECK on header->updating because we may be fixing a crash.
-bool BlockFiles::FixBlockFileHeader(MappedFile* file) {
- ScopedFlush flush(file);
- BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
- int file_size = static_cast<int>(file->GetLength());
- if (file_size < static_cast<int>(sizeof(*header)))
+bool BlockFiles::FixBlockFileHeader(int index) {
+ DCHECK_GE(block_headers_.size(), static_cast<unsigned int>(index));
+ MappedFile* header_file = block_headers_[index];
+ ScopedFlush flush(header_file);
+ BlockHeader file_header(header_file);
+
+ MappedFile* data_file = data_offset_ ? header_file : block_data_[index];
+ int file_size = static_cast<int>(data_file->GetLength());
+ if (file_size < data_offset_)
return false; // file_size > 2GB is also an error.
const int kMinBlockSize = 36;
const int kMaxBlockSize = 4096;
- if (header->entry_size < kMinBlockSize ||
- header->entry_size > kMaxBlockSize || header->num_entries < 0)
+ if (file_header.Header()->entry_size < kMinBlockSize ||
+ file_header.Header()->entry_size > kMaxBlockSize ||
+ file_header.Header()->num_entries < 0)
return false;
// Make sure that we survive crashes.
- header->updating = 1;
- int expected = header->entry_size * header->max_entries + sizeof(*header);
+ file_header.Header()->updating = 1;
+ int expected =
+ file_header.Header()->entry_size * file_header.Header()->max_entries +
+ data_offset_;
if (file_size != expected) {
- int max_expected = header->entry_size * kMaxBlocks + sizeof(*header);
- if (file_size < expected || header->empty[3] || file_size > max_expected) {
+ int max_expected = file_header.Header()->entry_size * kMaxBlocks +
+ data_offset_;
+ if (file_size < expected || file_header.Header()->empty[3] ||
+ file_size > max_expected) {
NOTREACHED();
LOG(ERROR) << "Unexpected file size";
return false;
}
// We were in the middle of growing the file.
- int num_entries = (file_size - sizeof(*header)) / header->entry_size;
- header->max_entries = num_entries;
+ int num_entries = (file_size - data_offset_) /
+ file_header.Header()->entry_size;
+ file_header.Header()->max_entries = num_entries;
}
- FixAllocationCounters(header);
- int empty_blocks = EmptyBlocks(header);
- if (empty_blocks + header->num_entries > header->max_entries)
- header->num_entries = header->max_entries - empty_blocks;
+ file_header.FixAllocationCounters();
+ int empty_blocks = file_header.EmptyBlocks();
+ if (empty_blocks + file_header.Header()->num_entries >
+ file_header.Header()->max_entries) {
+ file_header.Header()->num_entries = file_header.Header()->max_entries -
+ empty_blocks;
+ }
- if (!ValidateCounters(header))
+ if (!file_header.ValidateCounters())
return false;
- header->updating = 0;
+ file_header.Header()->updating = 0;
return true;
}
@@ -655,11 +847,11 @@
*used_count = 0;
*load = 0;
for (;;) {
- if (!block_files_[index] && !OpenBlockFile(index))
+ if (!block_headers_[index] && !OpenBlockFile(index))
return;
BlockFileHeader* header =
- reinterpret_cast<BlockFileHeader*>(block_files_[index]->buffer());
+ reinterpret_cast<BlockFileHeader*>(block_headers_[index]->buffer());
max_blocks += header->max_entries;
int used = header->max_entries;
@@ -677,11 +869,18 @@
*load = *used_count * 100 / max_blocks;
}
-base::FilePath BlockFiles::Name(int index) {
+base::FilePath BlockFiles::HeaderName(int index) {
// The file format allows for 256 files.
- DCHECK(index < 256 || index >= 0);
+ DCHECK(index < 256 && index >= 0);
std::string tmp = base::StringPrintf("%s%d", kBlockName, index);
return path_.AppendASCII(tmp);
}
+base::FilePath BlockFiles::DataName(int index) {
+ // The file format allows for 256 files.
+ DCHECK(index < 256 || index >= 0);
+ std::string tmp = base::StringPrintf("%s%d_d", kBlockName, index);
+ return path_.AppendASCII(tmp);
+}
+
} // namespace disk_cache
« no previous file with comments | « net/disk_cache/block_files.h ('k') | net/disk_cache/block_files_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698