Chromium Code Reviews| Index: net/disk_cache/simple/simple_index.cc |
| diff --git a/net/disk_cache/simple/simple_index.cc b/net/disk_cache/simple/simple_index.cc |
| index 2a915bec6425ed80a78a94681c40f1436a390732..98939fc79ba9c862214adc004741c9cab3a1940c 100644 |
| --- a/net/disk_cache/simple/simple_index.cc |
| +++ b/net/disk_cache/simple/simple_index.cc |
| @@ -9,36 +9,65 @@ |
| #include "base/message_loop.h" |
|
Philippe
2013/04/15 13:57:57
Nit: base/logging.h is missing.
felipeg
2013/04/15 14:39:07
Done.
|
| #include "base/task_runner.h" |
| #include "base/threading/worker_pool.h" |
| -#include "net/base/io_buffer.h" |
| #include "net/base/net_errors.h" |
| -#include "net/disk_cache/simple/simple_disk_format.h" |
| -#include "third_party/zlib/zlib.h" |
| +#include "net/disk_cache/simple/simple_index_file.h" |
| +#include "net/disk_cache/simple/simple_index_util.h" |
| -namespace { |
| +namespace disk_cache { |
| + |
| +EntryMetadata::EntryMetadata() {} |
|
Philippe
2013/04/15 13:57:57
You need to have an initializer list here.
felipeg
2013/04/15 14:39:07
Done.
|
| -const uint64 kMaxEntiresInIndex = 100000000; |
| +EntryMetadata::EntryMetadata(uint64 hash_key, |
| + base::Time last_used_time, |
|
Philippe
2013/04/15 13:57:57
Nit: indentation issue.
pasko-google - do not use
2013/04/15 14:23:55
align
felipeg
2013/04/15 14:39:07
Done.
|
| + uint64 entry_size) : |
| + hash_key_(hash_key), |
| + last_used_time_(last_used_time.ToInternalValue()), |
| + entry_size_(entry_size) {} |
|
Philippe
2013/04/15 13:57:57
Nit: the '}' should be on the line below in cases
felipeg
2013/04/15 14:39:07
Done.
|
| -bool CheckHeader(disk_cache::SimpleIndexFile::Header header) { |
| - return header.number_of_entries <= kMaxEntiresInIndex && |
| - header.initial_magic_number == |
| - disk_cache::kSimpleIndexInitialMagicNumber && |
| - header.version == disk_cache::kSimpleVersion; |
| +uint64 EntryMetadata::GetHashKey() const { |
|
Philippe
2013/04/15 13:57:57
See my comment in the header.
felipeg
2013/04/15 14:39:07
Done.
|
| + return hash_key_; |
| } |
| -class FileAutoCloser { |
| - public: |
| - explicit FileAutoCloser(const base::PlatformFile& file) : file_(file) { } |
| - ~FileAutoCloser() { |
| - base::ClosePlatformFile(file_); |
| - } |
| - private: |
| - base::PlatformFile file_; |
| - DISALLOW_COPY_AND_ASSIGN(FileAutoCloser); |
| -}; |
| +base::Time EntryMetadata::GetLastUsedTime() const { |
| + return base::Time::FromInternalValue(last_used_time_); |
| +} |
| -} // namespace |
| +void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) { |
| + last_used_time_ = last_used_time.ToInternalValue(); |
| +} |
| -namespace disk_cache { |
| +void EntryMetadata::Serialize(Pickle* pickle) const { |
|
Philippe
2013/04/15 13:57:57
I would leverage the fact that you are using size-
felipeg
2013/04/15 14:39:07
Done.
|
| + DCHECK(pickle); |
| + pickle->WriteUInt64(hash_key_); |
| + pickle->WriteInt64(last_used_time_); |
| + pickle->WriteUInt64(entry_size_); |
| +} |
| + |
| +// static |
| +bool EntryMetadata::DeSerialize(PickleIterator* it, |
| + EntryMetadata* out) { |
|
Philippe
2013/04/15 13:57:57
Nit: lines should not be broken when they don't ha
felipeg
2013/04/15 14:39:07
Done.
|
| + DCHECK(it); |
| + DCHECK(out); |
| + return it->ReadUInt64(&(out->hash_key_)) && |
|
Philippe
2013/04/15 13:57:57
Nit: extra space after return.
Philippe
2013/04/15 13:57:57
Nit: extra parenthesis around out->hash_key_.
felipeg
2013/04/15 14:39:07
Done.
felipeg
2013/04/15 14:39:07
Done.
|
| + it->ReadInt64(&(out->last_used_time_)) && |
| + it->ReadUInt64(&(out->entry_size_)); |
| +} |
| + |
| +void EntryMetadata::MergeWith(const EntryMetadata& from) { |
| + DCHECK_EQ(hash_key_, from.hash_key_); |
| + if (last_used_time_ == 0) |
| + last_used_time_ = from.last_used_time_; |
| + if (entry_size_ == 0) |
| + entry_size_ = from.entry_size_; |
| +} |
| + |
| +void InsertInEntrySet(disk_cache::EntrySet* entry_set, |
| + const disk_cache::EntryMetadata& entry_metadata) { |
| + // TODO(felipeg): Use a hash_set instead of a hash_map. |
|
pasko-google - do not use
2013/04/15 14:23:55
I saw this TODO in another place, no need to repea
felipeg
2013/04/15 15:37:04
Done.
|
| + DCHECK(entry_set); |
| + entry_set->insert( |
| + std::make_pair(entry_metadata.GetHashKey(), entry_metadata)); |
| +} |
| SimpleIndex::SimpleIndex( |
| const scoped_refptr<base::TaskRunner>& cache_thread, |
| @@ -57,105 +86,24 @@ SimpleIndex::~SimpleIndex() { |
| void SimpleIndex::Initialize() { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| - MergeCallback merge_callback = base::Bind(&SimpleIndex::MergeInitializingSet, |
| - this->AsWeakPtr()); |
| + IndexCompletionCallback merge_callback = |
| + base::Bind(&SimpleIndex::MergeInitializingSet, |
| + this->AsWeakPtr()); |
| base::WorkerPool::PostTask(FROM_HERE, |
| - base::Bind(&SimpleIndex::LoadFromDisk, |
| + base::Bind(&SimpleIndexFile::LoadFromDisk, |
| index_filename_, |
| io_thread_, |
| merge_callback), |
| true); |
| } |
| -// static |
| -void SimpleIndex::LoadFromDisk( |
| - const base::FilePath& index_filename, |
| - const scoped_refptr<base::TaskRunner>& io_thread, |
| - const MergeCallback& merge_callback) { |
| - // Open the index file. |
| - base::PlatformFileError error; |
| - base::PlatformFile index_file = base::CreatePlatformFile( |
| - index_filename, |
| - base::PLATFORM_FILE_OPEN_ALWAYS | |
| - base::PLATFORM_FILE_READ | |
| - base::PLATFORM_FILE_WRITE, |
| - NULL, |
| - &error); |
| - FileAutoCloser auto_close_index_file(index_file); |
| - if (error != base::PLATFORM_FILE_OK) { |
| - LOG(ERROR) << "Error opening file " << index_filename.value(); |
| - return RestoreFromDisk(index_filename, io_thread, merge_callback); |
| - } |
| - |
| - uLong incremental_crc = crc32(0L, Z_NULL, 0); |
| - int64 index_file_offset = 0; |
| - SimpleIndexFile::Header header; |
| - if (base::ReadPlatformFile(index_file, |
| - index_file_offset, |
| - reinterpret_cast<char*>(&header), |
| - sizeof(header)) != sizeof(header)) { |
| - return RestoreFromDisk(index_filename, io_thread, merge_callback); |
| - } |
| - index_file_offset += sizeof(header); |
| - incremental_crc = crc32(incremental_crc, |
| - reinterpret_cast<const Bytef*>(&header), |
| - implicit_cast<uInt>(sizeof(header))); |
| - |
| - if (!CheckHeader(header)) { |
| - LOG(ERROR) << "Invalid header on Simple Cache Index."; |
| - return RestoreFromDisk(index_filename, io_thread, merge_callback); |
| - } |
| - |
| - const int entries_buffer_size = |
| - header.number_of_entries * SimpleIndexFile::kEntryMetadataSize; |
| - |
| - scoped_ptr<char[]> entries_buffer(new char[entries_buffer_size]); |
| - if (base::ReadPlatformFile(index_file, |
| - index_file_offset, |
| - entries_buffer.get(), |
| - entries_buffer_size) != entries_buffer_size) { |
| - return RestoreFromDisk(index_filename, io_thread, merge_callback); |
| - } |
| - index_file_offset += entries_buffer_size; |
| - incremental_crc = crc32(incremental_crc, |
| - reinterpret_cast<const Bytef*>(entries_buffer.get()), |
| - implicit_cast<uInt>(entries_buffer_size)); |
| - |
| - SimpleIndexFile::Footer footer; |
| - if (base::ReadPlatformFile(index_file, |
| - index_file_offset, |
| - reinterpret_cast<char*>(&footer), |
| - sizeof(footer)) != sizeof(footer)) { |
| - return RestoreFromDisk(index_filename, io_thread, merge_callback); |
| - } |
| - const uint32 crc_read = footer.crc; |
| - const uint32 crc_calculated = incremental_crc; |
| - if (crc_read != crc_calculated) |
| - return RestoreFromDisk(index_filename, io_thread, merge_callback); |
| - |
| - scoped_ptr<EntrySet> index_file_entries(new EntrySet()); |
| - int entries_buffer_offset = 0; |
| - while(entries_buffer_offset < entries_buffer_size) { |
| - SimpleIndexFile::EntryMetadata entry_metadata; |
| - SimpleIndexFile::EntryMetadata::DeSerialize( |
| - &entries_buffer.get()[entries_buffer_offset], &entry_metadata); |
| - InsertInternal(index_file_entries.get(), entry_metadata); |
| - entries_buffer_offset += SimpleIndexFile::kEntryMetadataSize; |
| - } |
| - DCHECK_EQ(header.number_of_entries, index_file_entries->size()); |
| - |
| - io_thread->PostTask(FROM_HERE, |
| - base::Bind(merge_callback, |
| - base::Passed(&index_file_entries))); |
| -} |
| - |
| void SimpleIndex::Insert(const std::string& key) { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| // Upon insert we don't know yet the size of the entry. |
| // It will be updated later when the SimpleEntryImpl finishes opening or |
| // creating the new entry, and then UpdateEntrySize will be called. |
| const uint64 hash_key = GetEntryHashKey(key); |
| - InsertInternal(&entries_set_, SimpleIndexFile::EntryMetadata( |
| + InsertInEntrySet(&entries_set_, EntryMetadata( |
| hash_key, |
| base::Time::Now(), 0)); |
| if (!initialized_) |
| @@ -197,85 +145,13 @@ bool SimpleIndex::UpdateEntrySize(const std::string& key, uint64 entry_size) { |
| return false; |
| // Update the total cache size with the new entry size. |
| - cache_size_ -= it->second.entry_size; |
| + cache_size_ -= it->second.GetEntrySize(); |
| cache_size_ += entry_size; |
| - it->second.entry_size = entry_size; |
| + it->second.SetEntrySize(entry_size); |
| return true; |
| } |
| -// static |
| -void SimpleIndex::InsertInternal( |
| - EntrySet* entry_set, |
| - const SimpleIndexFile::EntryMetadata& entry_metadata) { |
| - // TODO(felipeg): Use a hash_set instead of a hash_map. |
| - DCHECK(entry_set); |
| - entry_set->insert( |
| - std::make_pair(entry_metadata.GetHashKey(), entry_metadata)); |
| -} |
| - |
| -// static |
| -void SimpleIndex::RestoreFromDisk( |
| - const base::FilePath& index_filename, |
| - const scoped_refptr<base::TaskRunner>& io_thread, |
| - const MergeCallback& merge_callback) { |
| - using file_util::FileEnumerator; |
| - LOG(INFO) << "Simple Cache Index is being restored from disk."; |
| - |
| - file_util::Delete(index_filename, /* recursive = */ false); |
| - scoped_ptr<EntrySet> index_file_entries(new EntrySet()); |
| - |
| - // TODO(felipeg,gavinp): Fix this once we have a one-file per entry format. |
| - COMPILE_ASSERT(kSimpleEntryFileCount == 3, |
| - file_pattern_must_match_file_count); |
| - const base::FilePath::StringType file_pattern = FILE_PATH_LITERAL("*_[0-2]"); |
| - FileEnumerator enumerator(index_filename.DirName(), |
| - false /* recursive */, |
| - FileEnumerator::FILES, |
| - file_pattern); |
| - for (base::FilePath file_path = enumerator.Next(); !file_path.empty(); |
| - file_path = enumerator.Next()) { |
| - const base::FilePath::StringType base_name = file_path.BaseName().value(); |
| - // Converting to std::string is OK since we never use UTF8 wide chars in our |
| - // file names. |
| - const std::string hash_name(base_name.begin(), base_name.end()); |
| - const std::string hash_key_string = |
| - hash_name.substr(0, kEntryHashKeyAsHexStringSize); |
| - uint64 hash_key = 0; |
| - if (!GetEntryHashKeyFromHexString(hash_key_string, &hash_key)) { |
| - LOG(WARNING) << "Invalid Entry Hash Key filename while restoring " |
| - << "Simple Index from disk: " << hash_name; |
| - // TODO(felipeg): Should we delete the invalid file here ? |
| - continue; |
| - } |
| - |
| - FileEnumerator::FindInfo find_info = {}; |
| - enumerator.GetFindInfo(&find_info); |
| - base::Time last_used_time; |
| -#if defined(OS_POSIX) |
| - // For POSIX systems, a last access time is available. However, it's not |
| - // guaranteed to be more accurate than mtime. It is no worse though. |
| - last_used_time = base::Time::FromTimeT(find_info.stat.st_atime); |
| -#endif |
| - if (last_used_time.is_null()) |
| - last_used_time = FileEnumerator::GetLastModifiedTime(find_info); |
| - |
| - int64 file_size = FileEnumerator::GetFilesize(find_info); |
| - EntrySet::iterator it = index_file_entries->find(hash_key); |
| - if (it == index_file_entries->end()) { |
| - InsertInternal(index_file_entries.get(), SimpleIndexFile::EntryMetadata( |
| - hash_key, last_used_time, file_size)); |
| - } else { |
| - // Summing up the total size of the entry through all the *_[0-2] files |
| - it->second.entry_size += file_size; |
| - } |
| - } |
| - |
| - io_thread->PostTask(FROM_HERE, |
| - base::Bind(merge_callback, |
| - base::Passed(&index_file_entries))); |
| -} |
| - |
| void SimpleIndex::MergeInitializingSet( |
| scoped_ptr<EntrySet> index_file_entries) { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| @@ -296,79 +172,27 @@ void SimpleIndex::MergeInitializingSet( |
| EntrySet::iterator current_entry = entries_set_.find(it->first); |
| if (current_entry != entries_set_.end()) { |
| // When Merging, existing valid data in the |current_entry| will prevail. |
| - SimpleIndexFile::EntryMetadata::Merge( |
| - it->second, &(current_entry->second)); |
| - cache_size_ += current_entry->second.entry_size; |
| + current_entry->second.MergeWith(it->second); |
| + cache_size_ += current_entry->second.GetEntrySize(); |
| } else { |
| - InsertInternal(&entries_set_, it->second); |
| - cache_size_ += it->second.entry_size; |
| + InsertInEntrySet(&entries_set_, it->second); |
| + cache_size_ += it->second.GetEntrySize(); |
| } |
| } |
| initialized_ = true; |
| } |
| -void SimpleIndex::Serialize(std::string* out_buffer) { |
| - DCHECK(io_thread_checker_.CalledOnValidThread()); |
| - DCHECK(out_buffer); |
| - SimpleIndexFile::Header header; |
| - SimpleIndexFile::Footer footer; |
| - |
| - header.initial_magic_number = kSimpleIndexInitialMagicNumber; |
| - header.version = kSimpleVersion; |
| - header.number_of_entries = entries_set_.size(); |
| - |
| - out_buffer->reserve( |
| - sizeof(header) + |
| - sizeof(SimpleIndexFile::EntryMetadata) * entries_set_.size() + |
| - sizeof(footer)); |
| - |
| - // The Header goes first. |
| - out_buffer->append(reinterpret_cast<const char*>(&header), |
| - sizeof(header)); |
| - |
| - // Then all the entries from |entries_set_|. |
| - for (EntrySet::const_iterator it = entries_set_.begin(); |
| - it != entries_set_.end(); ++it) { |
| - SimpleIndexFile::EntryMetadata::Serialize(it->second, out_buffer); |
| - } |
| - |
| - // Then, CRC. |
| - footer.crc = crc32(crc32(0, Z_NULL, 0), |
| - reinterpret_cast<const Bytef*>(out_buffer->data()), |
| - implicit_cast<uInt>(out_buffer->size())); |
| - |
| - out_buffer->append(reinterpret_cast<const char*>(&footer), sizeof(footer)); |
| -} |
| - |
| void SimpleIndex::WriteToDisk() { |
| DCHECK(io_thread_checker_.CalledOnValidThread()); |
| - scoped_ptr<std::string> buffer(new std::string()); |
| - Serialize(buffer.get()); |
| + SimpleIndexFile::IndexMetadata index_metadata(entries_set_.size(), |
| + cache_size_); |
| + scoped_ptr<Pickle> pickle = SimpleIndexFile::Serialize(index_metadata, |
| + entries_set_); |
| cache_thread_->PostTask(FROM_HERE, base::Bind( |
| - &SimpleIndex::UpdateFile, |
| + &SimpleIndexFile::WriteToDisk, |
| index_filename_, |
| - index_filename_.DirName().AppendASCII("index_temp"), |
| - base::Passed(&buffer))); |
| -} |
| - |
| -// static |
| -void SimpleIndex::UpdateFile(const base::FilePath& index_filename, |
| - const base::FilePath& temp_filename, |
| - scoped_ptr<std::string> buffer) { |
| - int bytes_written = file_util::WriteFile( |
| - temp_filename, buffer->data(), buffer->size()); |
| - DCHECK_EQ(bytes_written, implicit_cast<int>(buffer->size())); |
| - if (bytes_written != static_cast<int>(buffer->size())) { |
| - // TODO(felipeg): Add better error handling. |
| - LOG(ERROR) << "Could not write Simple Cache index to temporary file: " |
| - << temp_filename.value(); |
| - file_util::Delete(temp_filename, /* recursive = */ false); |
| - return; |
| - } |
| - // Swap temp and index_file. |
| - bool result = file_util::ReplaceFile(temp_filename, index_filename); |
| - DCHECK(result); |
| + base::Passed(&pickle))); |
| } |
| } // namespace disk_cache |