Index: net/disk_cache/v3/backend_worker.cc |
=================================================================== |
--- net/disk_cache/v3/backend_worker.cc (revision 0) |
+++ net/disk_cache/v3/backend_worker.cc (revision 0) |
@@ -0,0 +1,731 @@ |
+// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "net/disk_cache/v3/backend_worker.h" |
+ |
+#include "base/bind.h" |
+#include "base/file_util.h" |
+#include "base/message_loop.h" |
+#include "base/stringprintf.h" |
+#include "net/base/net_errors.h" |
+#include "net/disk_cache/cache_util.h" |
+#include "net/disk_cache/errors.h" |
+#include "net/disk_cache/experiments.h" |
+#include "net/disk_cache/mapped_file.h" |
+#include "net/disk_cache/v3/backend_work_item.h" |
+#include "net/disk_cache/v3/disk_format_v3.h" |
+ |
+using base::Time; |
+using base::TimeDelta; |
+using base::TimeTicks; |
+ |
+namespace { |
+ |
+const char kIndexName[] = "index"; |
+const char kIndexBackupName[] = "index_bak"; |
+const char kTable1Name[] = "index_tb1"; |
+const char kTable2Name[] = "index_tb2"; |
+const char kTable2TempName[] = "index_tb2_tmp"; |
+const int kMaxOldFolders = 100; |
+ |
+// Seems like ~240 MB correspond to less than 50k entries for 99% of the people. |
+// Note that the actual target is to keep the index table load factor under 55% |
+// for most users. |
+const int k64kEntriesStore = 240 * 1000 * 1000; |
+const int kBaseTableLen = 64 * 1024; |
+const int kDefaultCacheSize = 80 * 1024 * 1024; |
+ |
+// Avoid trimming the cache for the first 5 minutes (10 timer ticks). |
+const int kTrimDelay = 10; |
+ |
+int DesiredIndexTableLen(int32 storage_size) { |
+ if (storage_size <= k64kEntriesStore) |
+ return kBaseTableLen; |
+ if (storage_size <= k64kEntriesStore * 2) |
+ return kBaseTableLen * 2; |
+ if (storage_size <= k64kEntriesStore * 4) |
+ return kBaseTableLen * 4; |
+ if (storage_size <= k64kEntriesStore * 8) |
+ return kBaseTableLen * 8; |
+ |
+ // The biggest storage_size for int32 requires a 4 MB table. |
+ return kBaseTableLen * 16; |
+} |
+ |
+int MaxStorageSizeForTable(int table_len) { |
+ return table_len * (k64kEntriesStore / kBaseTableLen); |
+} |
+ |
+size_t GetIndexSize(int table_len) { |
+ // |
+ //size_t table_size = sizeof(disk_cache::CacheAddr) * table_len; |
+ //return sizeof(disk_cache::IndexHeaderV3) + table_size; |
+ return 0; |
+} |
+ |
+size_t GetIndexBitmapSize(int table_len) { |
+ DCHECK_LT(table_len, 1 << 22); |
+ size_t base_bits = disk_cache::kBaseBitmapBytes * 8; |
+ if (table_len < static_cast<int>(base_bits)) |
+ return sizeof(disk_cache::IndexBitmap); |
+ |
+ size_t extra_pages = (table_len / 8) - disk_cache::kBaseBitmapBytes; |
+ extra_pages = (extra_pages + 4095) / 4096; |
+ return sizeof(disk_cache::IndexBitmap) + extra_pages * 4096; |
+} |
+ |
+// ------------------------------------------------------------------------ |
+ |
+// Sets group for the current experiment. Returns false if the files should be |
+// discarded. |
+bool InitExperiment(disk_cache::IndexHeaderV3* header) { |
+ header->experiment = disk_cache::NO_EXPERIMENT; |
+ return true; |
+} |
+ |
+} // namespace |
+ |
+// ------------------------------------------------------------------------ |
+ |
+namespace disk_cache { |
+ |
+BackendImplV3::Worker::Worker(const base::FilePath& path, |
+ base::MessageLoopProxy* main_thread) |
+ : path_(path), |
+ main_thread_(main_thread), |
+ cleanup_work_item_(NULL), |
+ init_(false), |
+ doubling_index_(false), |
+ user_flags_(0) { |
+} |
+ |
+int BackendImplV3::Worker::Init(uint32 flags, scoped_ptr<InitResult>* result) { |
+ DCHECK(!init_); |
+ if (init_) |
+ return ERR_INIT_FAILED; |
+ |
+ user_flags_ = flags; |
+ result->reset(new InitResult); |
+ |
+ bool create_files = false; |
+ if (!InitBackingStore(&create_files)) |
+ return ERR_STORAGE_ERROR; |
+ |
+ init_ = true; |
+ if (!LoadIndex(result->get())) |
+ return ERR_INIT_FAILED; |
+ |
+ int rv = ERR_NO_ERROR; |
+ IndexHeaderV3* index = |
+ reinterpret_cast<IndexHeaderV3*>(index_header_->buffer()); |
+ if (create_files || !index->num_entries) |
+ rv = ERR_CACHE_CREATED; |
+ |
+ if (create_files && (flags & EVICTION_V2)) { |
+ index->flags |= CACHE_EVICTION_2; |
+ } |
+ |
+ if (!(flags & BASIC_UNIT_TEST) && !InitExperiment(index)) |
+ return ERR_INIT_FAILED; |
+ |
+ if (index->crash != 0) |
+ rv = ERR_PREVIOUS_CRASH; |
+ index->crash = 1; |
+ |
+ block_files_.reset(new BlockFiles(path_)); |
+ if (flags & BASIC_UNIT_TEST) |
+ block_files_->UseSmallSizeIncrementsForTest(); |
+ |
+ if (!block_files_->Init(create_files, kFirstAdditionalBlockFileV3)) |
+ return ERR_INIT_FAILED; |
+ |
+ block_files_->GetBitmaps(index->max_block_file, |
+ &result->get()->block_bitmaps); |
+ index->max_block_file = static_cast<int>(result->get()->block_bitmaps.size()); |
+ |
+ if (!InitStats(index, result->get())) |
+ return ERR_INIT_FAILED; |
+ |
+#if defined(STRESS_CACHE_EXTENDED_VALIDATION) |
+ trace_object_->EnableTracing(false); |
+ int sc = SelfCheck(); |
+ if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH) |
+ NOTREACHED(); |
+ trace_object_->EnableTracing(true); |
+#endif |
+ |
+ return rv; |
+} |
+ |
+int BackendImplV3::Worker::Restart(uint32 flags, |
+ scoped_ptr<InitResult>* result) { |
+ Trace("Worker::Restart"); |
+ if (init_) { |
+ init_ = false; |
+ } |
+ |
+ CloseFiles(); |
+ DeleteCache(path_, false); |
+ |
+ return Init(flags, result); |
+} |
+ |
+int BackendImplV3::Worker::GrowIndex(uint32 flags, |
+ scoped_ptr<InitResult>* result) { |
+ Trace("Worker::GrowIndex, flags 0x%x", flags); |
+ if (!init_) |
+ return ERR_OPERATION_FAILED; |
+ |
+ if (flags & WorkItem::WORK_COMPLETE) { |
+ index_header_ = big_index_header_; |
+ big_index_header_ = NULL; |
+ if (big_main_table_) { |
+ main_table_ = big_main_table_; |
+ big_main_table_ = NULL; |
+ } |
+ if (!big_extra_temp_table_) |
+ extra_table_ = big_extra_table_; |
+ big_extra_table_ = NULL; |
+ |
+ // If the index takes time to move the cells, it creates a new work item to |
+ // notify completion, which executes this code. |
+ if (big_extra_temp_table_) |
+ return GrowDone(); |
+ |
+ return ERR_NO_ERROR; |
+ } |
+ |
+ IndexHeaderV3* header = |
+ reinterpret_cast<IndexHeaderV3*>(index_header_->buffer()); |
+ |
+ int current_main_len = header->table_len / kBaseTableLen * kBaseTableLen; |
+ int step_size = std::min(8192, current_main_len / 8); |
+ if (user_flags_ & BASIC_UNIT_TEST) |
+ step_size = 8; |
+ if ((user_flags_ & UNIT_TEST_MODE) && !doubling_index_) |
+ step_size = (header->table_len * 3 / 2) & 0x7ffffff0; |
+ int new_len = header->table_len + step_size; |
+ |
+ bool double_index = false; |
+ if (!doubling_index_) { |
+ DCHECK(!big_extra_table_); |
+ DCHECK(!big_main_table_); |
+ double_index = (new_len / kBaseTableLen != |
+ header->table_len / kBaseTableLen); |
+ } |
+ |
+ int extra_len = new_len - kBaseTableLen; |
+ if (double_index) { |
+ // We double the table when the extra table is about to reach the size of |
+ // the main table. That means that right after this, the new extra table |
+ // should be between 19% and 23% of the main table so we start with 25%. |
+ extra_len = std::min(8192, current_main_len / 4); |
+ extra_len = (user_flags_ & BASIC_UNIT_TEST) ? 128 : extra_len; |
+ int main_len = (header->table_len / kBaseTableLen + 1) * kBaseTableLen; |
+ new_len = main_len + extra_len; |
+ |
+ if (!CreateExtraTable(extra_len * kBytesPerCell)) |
+ return ERR_OPERATION_FAILED; |
+ |
+ if (!main_table_->SetLength(main_len * kBytesPerCell)) |
+ return ERR_OPERATION_FAILED; |
+ } else if (doubling_index_) { |
+ if (!big_extra_temp_table_->SetLength(extra_len * kBytesPerCell)) |
+ return ERR_OPERATION_FAILED; |
+ } else { |
+ if (!extra_table_->SetLength(extra_len * kBytesPerCell)) |
+ return ERR_OPERATION_FAILED; |
+ } |
+ |
+ if (!index_header_->SetLength(GetIndexBitmapSize(new_len))) |
+ return ERR_OPERATION_FAILED; |
+ |
+ scoped_refptr<MappedFile> big_index_header = new MappedFile(); |
+ if (!big_index_header->Init(path_.AppendASCII(kIndexName), 0)) { |
+ LOG(ERROR) << "Unable to remap index"; |
+ return ERR_OPERATION_FAILED; |
+ } |
+ |
+ scoped_refptr<MappedFile> big_extra_table = new MappedFile(); |
+ const char* extra_name = (double_index || doubling_index_) ? kTable2TempName : |
+ kTable2Name; |
+ if (!big_extra_table->Init(path_.AppendASCII(extra_name), 0)) { |
+ LOG(ERROR) << "Unable to remap index_tb2"; |
+ return ERR_OPERATION_FAILED; |
+ } |
+ |
+ if (double_index) { |
+ scoped_refptr<MappedFile> big_main_table = new MappedFile(); |
+ if (!big_main_table->Init(path_.AppendASCII(kTable1Name), 0)) { |
+ LOG(ERROR) << "Unable to remap index_tb1"; |
+ return ERR_OPERATION_FAILED; |
+ } |
+ big_main_table_.swap(big_main_table); |
+ |
+ // Grab an extra reference to the new extra table that can be used for an |
+ // extended period, while the index is being rebuilt. The normal reference |
+ // (big_extra_table_) will be released when the work item is completed, but |
+ // that doesn't mean the index is done with it. |
+ // Note that we are able to process slow grow requests even when the index |
+ // is being doubled. |
+ big_extra_temp_table_ = big_extra_table; |
+ } |
+ big_index_header_.swap(big_index_header); |
+ big_extra_table_.swap(big_extra_table); |
+ |
+ header = reinterpret_cast<IndexHeaderV3*>(big_index_header_->buffer()); |
+ header->table_len = new_len; |
+ |
+ result->reset(new InitResult); |
+ result->get()->index_data.main_table = NULL; |
+ |
+ result->get()->index_data.index_bitmap = |
+ reinterpret_cast<IndexBitmap*>(big_index_header_->buffer()); |
+ result->get()->index_data.extra_table = |
+ reinterpret_cast<IndexBucket*>(big_extra_table_->buffer()); |
+ |
+ if (double_index) { |
+ result->get()->index_data.main_table = |
+ reinterpret_cast<IndexBucket*>(big_main_table_->buffer()); |
+ doubling_index_ = true; |
+ } |
+ |
+ return ERR_NO_ERROR; |
+} |
+ |
+int BackendImplV3::Worker::GrowFiles(uint32 flags, |
+ scoped_ptr<InitResult>* result) { |
+ Trace("Worker::GrowFiles, flags 0x%x", flags); |
+ if (!init_) |
+ return ERR_OPERATION_FAILED; |
+ |
+ if (flags & WorkItem::WORK_COMPLETE) { |
+ block_files_.reset(); |
+ block_files_.swap(big_block_files_); |
+ return ERR_NO_ERROR; |
+ } |
+ |
+ big_block_files_.reset(new BlockFiles(path_)); |
+ if (user_flags_ & BASIC_UNIT_TEST) |
+ big_block_files_->UseSmallSizeIncrementsForTest(); |
+ |
+ if (!big_block_files_->Init(false, kFirstAdditionalBlockFileV3)) |
+ return ERR_INIT_FAILED; |
+ |
+ IndexHeaderV3* index = |
+ reinterpret_cast<IndexHeaderV3*>(index_header_->buffer()); |
+ |
+ result->reset(new InitResult); |
+ big_block_files_->GetBitmaps(index->max_block_file, |
+ &result->get()->block_bitmaps); |
+ index->max_block_file = static_cast<int>(result->get()->block_bitmaps.size()); |
+ return ERR_NO_ERROR; |
+} |
+ |
+int BackendImplV3::Worker::Delete(Addr address) { |
+ if (address.is_block_file()) |
+ return ERR_OPERATION_FAILED; |
+ |
+ if (DeleteCacheFile(GetFileName(address))) |
+ return ERR_NO_ERROR; |
+ |
+ return ERR_OPERATION_FAILED; |
+} |
+ |
+int BackendImplV3::Worker::Close(Addr address) { |
+ if (address.is_block_file()) |
+ return ERR_OPERATION_FAILED; |
+ |
+ FilesMap::iterator it = files_.find(address.value()); |
+ if (it != files_.end()) |
+ files_.erase(it); |
+ |
+ return ERR_NO_ERROR; |
+} |
+ |
+void BackendImplV3::Worker::OnDoWork(WorkItem* work_item) { |
+ if (work_item->type() == WorkItem::WORK_CLEANUP) |
+ return Cleanup(work_item); |
+ |
+ work_item->Start(this); |
+} |
+ |
+void BackendImplV3::Worker::DoneWithItem(WorkItem* work_item) { |
+ bool rv = main_thread_->PostTask(FROM_HERE, |
+ base::Bind(&WorkItem::OnDone, work_item)); |
+ DCHECK(rv); |
+} |
+ |
+File* BackendImplV3::Worker::GetBackingFile(Addr address, bool for_write) { |
+ disk_cache::File* file; |
+ if (address.is_separate_file()) |
+ file = GetExternalFile(address, for_write); |
+ else |
+ file = block_files_->GetFile(address); |
+ return file; |
+} |
+ |
+File* BackendImplV3::Worker::GetBackupIndexFile() { |
+ DCHECK(!index_backup_.get()); |
+ index_backup_ = new MappedFile(); |
+ index_backup_->set_force_creation(); |
+ if (!index_backup_->InitNoMap(path_.AppendASCII(kIndexBackupName))) { |
+ LOG(ERROR) << "Unable to open index_bak"; |
+ return NULL; |
+ } |
+ return index_backup_.get(); |
+} |
+ |
+void BackendImplV3::Worker::CloseBackupIndexFile() { |
+ index_backup_ = NULL; |
+} |
+ |
+bool BackendImplV3::Worker::IsValid() { |
+ return init_; |
+} |
+ |
+// ------------------------------------------------------------------------ |
+ |
+BackendImplV3::Worker::~Worker() { |
+ if (cleanup_work_item_) |
+ main_thread_->PostTask(FROM_HERE, |
+ base::Bind(&WorkItem::OnDone, cleanup_work_item_)); |
+} |
+ |
+void BackendImplV3::Worker::Cleanup(WorkItem* work_item) { |
+ Trace("Worker::Cleanup"); |
+ if (!work_item->user_callback().is_null()) |
+ cleanup_work_item_ = work_item; |
+ |
+ if (init_) { |
+ IndexHeaderV3* index = |
+ reinterpret_cast<IndexHeaderV3*>(index_header_->buffer()); |
+ index->crash = 0; |
+ } |
+ |
+ CloseFiles(); |
+ init_ = false; |
+ |
+ if (work_item->user_callback().is_null()) { |
+ // This is the only message we don't return to the main thread, we are done |
+ // with the work item for good. |
+ work_item->Release(); |
+ } |
+} |
+ |
+void BackendImplV3::Worker::CloseFiles() { |
+ index_header_ = NULL; |
+ main_table_ = NULL; |
+ extra_table_ = NULL; |
+ index_backup_ = NULL; |
+ block_files_->CloseFiles(); |
+ files_.clear(); |
+ |
+ big_index_header_ = NULL; |
+ big_main_table_ = NULL; |
+ big_extra_table_ = NULL; |
+ big_extra_temp_table_ = NULL; |
+ if (big_block_files_.get()) |
+ big_block_files_->CloseFiles(); |
+} |
+ |
+File* BackendImplV3::Worker::GetExternalFile(Addr address, bool for_write) { |
+ FilesMap::iterator it = files_.find(address.value()); |
+ if (it != files_.end()) |
+ return it->second; |
+ |
+ scoped_refptr<disk_cache::File> file(new disk_cache::File(false)); |
+ if (for_write) |
+ file->set_force_creation(); |
+ if (file->Init(GetFileName(address))) |
+ files_[address.value()] = file.get(); |
+ else |
+ file = NULL; |
+ |
+ return file; |
+} |
+ |
+base::FilePath BackendImplV3::Worker::GetFileName(Addr address) const { |
+ if (!address.is_separate_file() || !address.is_initialized()) { |
+ NOTREACHED(); |
+ return base::FilePath(); |
+ } |
+ |
+ std::string tmp = base::StringPrintf("f_%06x", address.FileNumber()); |
+ return path_.AppendASCII(tmp); |
+} |
+ |
+// We just created a new file so we're going to write the header and set the |
+// file length to include the hash table (zero filled). |
+bool BackendImplV3::Worker::CreateBackingStore(disk_cache::File* file) { |
+ IndexHeaderV3 header; |
+ memset(&header, 0, sizeof(header)); |
+ header.magic = kIndexMagicV3; |
+ header.version = kVersion3; |
+ header.max_block_file = kFirstAdditionalBlockFileV3; |
+ |
+ // Start with 12.5% of the size of the main table. |
+ int extra_len = (user_flags_ & BASIC_UNIT_TEST) ? 8 : kBaseTableLen / 8; |
+ header.table_len = kBaseTableLen + extra_len; |
+ header.max_bucket = kBaseTableLen / 4 - 1; |
+ header.flags = SMALL_CACHE; |
+ |
+ header.create_time = Time::Now().ToInternalValue(); |
+ header.base_time = (Time::Now() - TimeDelta::FromDays(20)).ToInternalValue(); |
+ |
+ if (!file->Write(&header, sizeof(header), 0)) |
+ return false; |
+ |
+ if (!file->SetLength(GetIndexBitmapSize(header.table_len))) |
+ return false; |
+ |
+ int flags = base::PLATFORM_FILE_READ | |
+ base::PLATFORM_FILE_WRITE | |
+ base::PLATFORM_FILE_CREATE | |
+ base::PLATFORM_FILE_EXCLUSIVE_WRITE; |
+ |
+ base::FilePath name = path_.AppendASCII(kIndexBackupName); |
+ scoped_refptr<disk_cache::File> file2(new disk_cache::File( |
+ base::CreatePlatformFile(name, flags, NULL, NULL))); |
+ |
+ if (!file2->IsValid()) |
+ return false; |
+ |
+ if (!file2->Write(&header, sizeof(header), 0)) |
+ return false; |
+ |
+ if (!file2->SetLength(GetIndexBitmapSize(header.table_len))) |
+ return false; |
+ |
+ name = path_.AppendASCII(kTable1Name); |
+ file2 = new disk_cache::File(base::CreatePlatformFile(name, flags, NULL, |
+ NULL)); |
+ if (!file2->IsValid()) |
+ return false; |
+ |
+ if (!file2->SetLength(kBaseTableLen * kBytesPerCell)) |
+ return false; |
+ |
+ name = path_.AppendASCII(kTable2Name); |
+ file2 = new disk_cache::File(base::CreatePlatformFile(name, flags, NULL, |
+ NULL)); |
+ if (!file2->IsValid()) |
+ return false; |
+ |
+ if (!file2->SetLength(extra_len * kBytesPerCell)) |
+ return false; |
+ |
+ return true; |
+} |
+ |
+bool BackendImplV3::Worker::CreateExtraTable(int extra_len) { |
+ int flags = base::PLATFORM_FILE_READ | |
+ base::PLATFORM_FILE_WRITE | |
+ base::PLATFORM_FILE_CREATE | |
+ base::PLATFORM_FILE_EXCLUSIVE_WRITE; |
+ |
+ base::FilePath name = path_.AppendASCII(kTable2TempName); |
+ scoped_refptr<disk_cache::File> file(new disk_cache::File( |
+ base::CreatePlatformFile(name, flags, NULL, NULL))); |
+ if (!file->IsValid()) |
+ return false; |
+ |
+ if (!file->SetLength(extra_len * kBytesPerCell)) |
+ return false; |
+ |
+ return true; |
+} |
+ |
+bool BackendImplV3::Worker::InitBackingStore(bool* file_created) { |
+ if (!file_util::CreateDirectory(path_)) |
+ return false; |
+ |
+ base::FilePath index_name = path_.AppendASCII(kIndexName); |
+ |
+ int flags = base::PLATFORM_FILE_READ | |
+ base::PLATFORM_FILE_WRITE | |
+ base::PLATFORM_FILE_OPEN_ALWAYS | |
+ base::PLATFORM_FILE_EXCLUSIVE_WRITE; |
+ scoped_refptr<disk_cache::File> file(new disk_cache::File( |
+ base::CreatePlatformFile(index_name, flags, file_created, NULL))); |
+ |
+ if (!file->IsValid()) |
+ return false; |
+ |
+ bool ret = true; |
+ if (*file_created) |
+ ret = CreateBackingStore(file); |
+ |
+ file = NULL; |
+ if (!ret) |
+ return false; |
+ |
+ index_header_ = new MappedFile(); |
+ if (!index_header_->Init(index_name, 0)) { |
+ LOG(ERROR) << "Unable to map index"; |
+ return false; |
+ } |
+ |
+ if (index_header_->GetLength() < sizeof(IndexBitmap)) { |
+ // We verify this again on CheckIndex() but it's easier to make sure now |
+ // that the header is there. |
+ LOG(ERROR) << "Corrupt index file"; |
+ return false; |
+ } |
+ |
+ main_table_ = new MappedFile(); |
+ if (!main_table_->Init(path_.AppendASCII(kTable1Name), 0)) { |
+ LOG(ERROR) << "Unable to map index_tb1"; |
+ return false; |
+ } |
+ |
+ extra_table_ = new MappedFile(); |
+ if (!extra_table_->Init(path_.AppendASCII(kTable2Name), 0)) { |
+ LOG(ERROR) << "Unable to map index_tb2"; |
+ return false; |
+ } |
+ |
+ index_backup_ = new MappedFile(); |
+ if (!index_backup_->Init(path_.AppendASCII(kIndexBackupName), 0)) { |
+ LOG(ERROR) << "Unable to map index_bak"; |
+ return false; |
+ } |
+ |
+ return true; |
+} |
+ |
+bool BackendImplV3::Worker::LoadIndex(InitResult* init_result) { |
+ init_result->index_data.index_bitmap = |
+ reinterpret_cast<IndexBitmap*>(index_header_->buffer()); |
+ init_result->index_data.main_table = |
+ reinterpret_cast<IndexBucket*>(main_table_->buffer()); |
+ init_result->index_data.extra_table = |
+ reinterpret_cast<IndexBucket*>(extra_table_->buffer()); |
+ |
+ if (!CheckIndexFile(index_header_)) |
+ return false; |
+ |
+ if (!CheckIndexFile(index_backup_)) |
+ return false; |
+ |
+ IndexHeaderV3& header = init_result->index_data.index_bitmap->header; |
+ |
+ size_t extra_table_len = header.table_len % kBaseTableLen; |
+ size_t main_table_len = (kBaseTableLen - extra_table_len) * kBytesPerCell; |
+ extra_table_len *= kBytesPerCell; |
+ |
+ if (main_table_->GetLength() < main_table_len || |
+ extra_table_->GetLength() < extra_table_len) { |
+ LOG(ERROR) << "Truncated table"; |
+ return false; |
+ } |
+ |
+ IndexBitmap* index = reinterpret_cast<IndexBitmap*>(index_backup_->buffer()); |
+ |
+ init_result->index_data.backup_header.reset(new IndexHeaderV3); |
+ memcpy(init_result->index_data.backup_header.get(), &index->header, |
+ sizeof(index->header)); |
+ |
+ size_t bitmap_len = GetIndexBitmapSize(index->header.table_len) - |
+ sizeof(index->header); |
+ init_result->index_data.backup_bitmap.reset(new uint32[bitmap_len / 4]); |
+ memcpy(init_result->index_data.backup_bitmap.get(), &index->bitmap, |
+ bitmap_len); |
+ |
+ // Close the backup. |
+ index_backup_ = NULL; |
+ return true; |
+} |
+ |
+bool BackendImplV3::Worker::CheckIndexFile(MappedFile* file) { |
+ size_t current_size = file->GetLength(); |
+ if (current_size < sizeof(IndexBitmap)) { |
+ LOG(ERROR) << "Corrupt Index file"; |
+ return false; |
+ } |
+ |
+ IndexHeaderV3* header = reinterpret_cast<IndexHeaderV3*>(file->buffer()); |
+ |
+ if (kIndexMagicV3 != header->magic || kVersion3 != header->version) { |
+ LOG(ERROR) << "Invalid file version or magic"; |
+ return false; |
+ } |
+ |
+ if (header->table_len <= 0 || header->table_len > 1 << 22) { |
+ LOG(ERROR) << "Invalid table size"; |
+ return false; |
+ } |
+ |
+ int min_mask = (user_flags_ & BASIC_UNIT_TEST) ? 0x3 : 0xff; |
+ if (current_size < GetIndexBitmapSize(header->table_len) || |
+ header->table_len & (min_mask)) { |
+ LOG(ERROR) << "Corrupt Index file"; |
+ return false; |
+ } |
+ |
+ //AdjustMaxCacheSize(header->table_len); |
+ |
+#if !defined(NET_BUILD_STRESS_CACHE) |
+ if (header->num_bytes < 0 || header->max_bytes < 0 || |
+ header->num_bytes > header->max_bytes + kDefaultCacheSize) { |
+ LOG(ERROR) << "Invalid cache size"; |
+ return false; |
+ } |
+#endif |
+ |
+ if (header->num_entries < 0) { |
+ LOG(ERROR) << "Invalid number of entries"; |
+ return false; |
+ } |
+ |
+ // Load the table into memory with a single read. |
+ //scoped_array<char> buf(new char[current_size]); |
+ //return index_->Read(buf.get(), current_size, 0); |
+ |
+ return true; |
+} |
+ |
+bool BackendImplV3::Worker::InitStats(IndexHeaderV3* index, |
+ InitResult* result) { |
+ Addr address(index->stats); |
+ if (!address.is_initialized()) |
+ return true; |
+ |
+ if (!address.is_block_file()) { |
+ NOTREACHED(); |
+ return false; |
+ } |
+ |
+ int size = address.num_blocks() * address.BlockSize(); |
+ |
+ // Load the required data. |
+ MappedFile* file = GetMappedFile(address); |
+ if (!file) |
+ return false; |
+ |
+ scoped_ptr<char[]> data(new char[size]); |
+ size_t offset = address.start_block() * address.BlockSize() + |
+ kBlockHeaderSize; |
+ if (!file->Read(data.get(), size, offset)) |
+ return false; |
+ |
+ result->stats_data = data.Pass(); |
+ return true; |
+} |
+ |
+int BackendImplV3::Worker::GrowDone() { |
+ Trace("Worker::GrowDone"); |
+ if (!init_) |
+ return ERR_OPERATION_FAILED; |
+ |
+ DCHECK(doubling_index_); |
+ doubling_index_ = false; |
+ |
+ extra_table_ = big_extra_temp_table_; |
+ big_extra_temp_table_ = NULL; |
+ |
+ return ERR_NO_ERROR; |
+} |
+ |
+} // namespace disk_cache |
Property changes on: net\disk_cache\v3\backend_worker.cc |
___________________________________________________________________ |
Added: svn:eol-style |
+ LF |