Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(373)

Unified Diff: net/disk_cache/v3/entry_operation.cc

Issue 15203004: Disk cache: Reference CL for the implementation of file format version 3. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: IndexTable review Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/disk_cache/v3/entry_operation.h ('k') | net/disk_cache/v3/eviction_v3.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/disk_cache/v3/entry_operation.cc
===================================================================
--- net/disk_cache/v3/entry_operation.cc (revision 0)
+++ net/disk_cache/v3/entry_operation.cc (revision 0)
@@ -0,0 +1,565 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/v3/entry_operation.h"
+
+#include "base/hash.h"
+#include "base/message_loop.h"
+#include "base/metrics/histogram.h"
+#include "base/string_util.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/bitmap.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/histogram_macros.h"
+#include "net/disk_cache/net_log_parameters.h"
+#include "net/disk_cache/sparse_control.h"
+#include "net/disk_cache/v3/backend_impl_v3.h"
+#include "net/disk_cache/v3/disk_format_v3.h"
+#include "net/disk_cache/v3/storage_block-inl.h"
+
+using base::Time;
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace {
+
+
+const int kMaxBufferSize = 1024 * 1024; // 1 MB.
+
+} // namespace
+
+namespace disk_cache {
+
+
+// ------------------------------------------------------------------------
+
+
+// ------------------------------------------------------------------------
+
+//int EntryImplV3::InternalReadData(int index, int offset,
+// IOBuffer* buf, int buf_len,
+// const CompletionCallback& callback) {
+// //DCHECK(node_.Data()->dirty || read_only_);
+// DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
+// if (index < 0 || index >= kNumStreams)
+// return net::ERR_INVALID_ARGUMENT;
+//
+// int entry_size = entry_->data_size[index];
+// if (offset >= entry_size || offset < 0 || !buf_len)
+// return 0;
+//
+// if (buf_len < 0)
+// return net::ERR_INVALID_ARGUMENT;
+//
+// if (!backend_)
+// return net::ERR_UNEXPECTED;
+//
+// TimeTicks start = TimeTicks::Now();
+//
+// if (offset + buf_len > entry_size)
+// buf_len = entry_size - offset;
+//
+// UpdateRank(false);
+//
+// backend_->OnEvent(Stats::READ_DATA);
+// backend_->OnRead(buf_len);
+//
+// Addr address(entry_->data_addr[index]);
+// int eof = address.is_initialized() ? entry_size : 0;
+// if (user_buffers_[index].get() &&
+// user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
+// // Complete the operation locally.
+// buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
+// ReportIOTime(kRead, start);
+// return buf_len;
+// }
+//
+// NOTREACHED();
+// /*
+// address.set_value(entry_->data_addr[index]);
+// DCHECK(address.is_initialized());
+// if (!address.is_initialized()) {
+// DoomImpl();
+// return net::ERR_FAILED;
+// }
+//
+// File* file = GetBackingFile(address, index);
+// if (!file) {
+// DoomImpl();
+// return net::ERR_FAILED;
+// }
+//
+// size_t file_offset = offset;
+// if (address.is_block_file()) {
+// DCHECK_LE(offset + buf_len, kMaxBlockSize);
+// file_offset += address.start_block() * address.BlockSize() +
+// kBlockHeaderSize;
+// }
+//
+// SyncCallback* io_callback = NULL;
+// if (!callback.is_null()) {
+// io_callback = new SyncCallback(this, buf, callback,
+// net::NetLog::TYPE_ENTRY_READ_DATA);
+// }
+//
+// TimeTicks start_async = TimeTicks::Now();
+//
+// bool completed;
+// if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
+// if (io_callback)
+// io_callback->Discard();
+// DoomImpl();
+// return net::ERR_FAILED;
+// }
+//
+// if (io_callback && completed)
+// io_callback->Discard();
+//
+// if (io_callback)
+// ReportIOTime(kReadAsync1, start_async);
+//
+// ReportIOTime(kRead, start);
+// return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;*/
+// return 0;
+//}
+//
+//int EntryImplV3::InternalWriteData(int index, int offset,
+// IOBuffer* buf, int buf_len,
+// const CompletionCallback& callback,
+// bool truncate) {
+// //DCHECK(node_.Data()->dirty || read_only_);
+// DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
+// if (index < 0 || index >= kNumStreams)
+// return net::ERR_INVALID_ARGUMENT;
+//
+// if (offset < 0 || buf_len < 0)
+// return net::ERR_INVALID_ARGUMENT;
+//
+// if (!backend_)
+// return net::ERR_UNEXPECTED;
+//
+// int max_file_size = backend_->MaxFileSize();
+//
+// // offset or buf_len could be negative numbers.
+// if (offset > max_file_size || buf_len > max_file_size ||
+// offset + buf_len > max_file_size) {
+// int size = offset + buf_len;
+// if (size <= max_file_size)
+// size = kint32max;
+// backend_->TooMuchStorageRequested(size);
+// return net::ERR_FAILED;
+// }
+//
+// TimeTicks start = TimeTicks::Now();
+//
+// // Read the size at this point (it may change inside prepare).
+// int entry_size = entry_->data_size[index];
+// bool extending = entry_size < offset + buf_len;
+// truncate = truncate && entry_size > offset + buf_len;
+// Trace("To PrepareTarget 0x%x", address_.value());
+// if (!PrepareTarget(index, offset, buf_len, truncate))
+// return net::ERR_FAILED;
+//
+// Trace("From PrepareTarget 0x%x", address_.value());
+// if (extending || truncate)
+// UpdateSize(index, entry_size, offset + buf_len);
+//
+// UpdateRank(true);
+//
+// backend_->OnEvent(Stats::WRITE_DATA);
+// backend_->OnWrite(buf_len);
+//
+// if (user_buffers_[index].get()) {
+// // Complete the operation locally.
+// user_buffers_[index]->Write(offset, buf, buf_len);
+// ReportIOTime(kWrite, start);
+// return buf_len;
+// }
+//
+// NOTREACHED();
+// /*
+// Addr address(entry_.Data()->data_addr[index]);
+// if (offset + buf_len == 0) {
+// if (truncate) {
+// DCHECK(!address.is_initialized());
+// }
+// return 0;
+// }
+//
+// File* file = GetBackingFile(address, index);
+// if (!file)
+// return net::ERR_FAILED;
+//
+// size_t file_offset = offset;
+// if (address.is_block_file()) {
+// DCHECK_LE(offset + buf_len, kMaxBlockSize);
+// file_offset += address.start_block() * address.BlockSize() +
+// kBlockHeaderSize;
+// } else if (truncate || (extending && !buf_len)) {
+// if (!file->SetLength(offset + buf_len))
+// return net::ERR_FAILED;
+// }
+//
+// if (!buf_len)
+// return 0;
+//
+// SyncCallback* io_callback = NULL;
+// if (!callback.is_null()) {
+// io_callback = new SyncCallback(this, buf, callback,
+// net::NetLog::TYPE_ENTRY_WRITE_DATA);
+// }
+//
+// TimeTicks start_async = TimeTicks::Now();
+//
+// bool completed;
+// if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
+// &completed)) {
+// if (io_callback)
+// io_callback->Discard();
+// return net::ERR_FAILED;
+// }
+//
+// if (io_callback && completed)
+// io_callback->Discard();
+//
+// if (io_callback)
+// ReportIOTime(kWriteAsync1, start_async);
+//
+// ReportIOTime(kWrite, start);
+// return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;*/
+// return 0;
+//}
+//
+//// ------------------------------------------------------------------------
+//
+//
+//// Note that this method may end up modifying a block file so upon return the
+//// involved block will be free, and could be reused for something else. If there
+//// is a crash after that point (and maybe before returning to the caller), the
+//// entry will be left dirty... and at some point it will be discarded; it is
+//// important that the entry doesn't keep a reference to this address, or we'll
+//// end up deleting the contents of |address| once again.
+//void EntryImplV3::DeleteData(Addr address, int index) {
+// DCHECK(backend_);
+// if (!address.is_initialized())
+// return;
+// if (address.is_separate_file()) {
+// int failure = !DeleteCacheFile(backend_->GetFileName(address));
+// CACHE_UMA(COUNTS, "DeleteFailed", 0, failure);
+// if (failure) {
+// LOG(ERROR) << "Failed to delete " <<
+// backend_->GetFileName(address).value() << " from the cache.";
+// }
+// if (files_[index])
+// files_[index] = NULL; // Releases the object.
+// } else {
+// backend_->DeleteBlock(address, true);
+// }
+//}
+//
+//void EntryImplV3::UpdateRank(bool modified) {
+// //if (!backend_)
+// // return;
+//
+// //if (!doomed_) {
+// // // Everything is handled by the backend.
+// // backend_->UpdateRank(this, modified);
+// // return;
+// //}
+//
+// //Time current = Time::Now();
+// //node_.Data()->last_used = current.ToInternalValue();
+//
+// //if (modified)
+// // node_.Data()->last_modified = current.ToInternalValue();
+//}
+
+// We keep a memory buffer for everything that ends up stored on a block file
+// (because we don't know yet the final data size), and for some of the data
+// that end up on external files. This function will initialize that memory
+// buffer and / or the files needed to store the data.
+//
+// In general, a buffer may overlap data already stored on disk, and in that
+// case, the contents of the buffer are the most accurate. It may also extend
+// the file, but we don't want to read from disk just to keep the buffer up to
+// date. This means that as soon as there is a chance to get confused about what
+// is the most recent version of some part of a file, we'll flush the buffer and
+// reuse it for the new data. Keep in mind that the normal use pattern is quite
+// simple (write sequentially from the beginning), so we optimize for handling
+// that case.
+bool EntryImplV3::PrepareTarget(int index, int offset, int buf_len,
+ bool truncate) {
+ if (truncate)
+ return HandleTruncation(index, offset, buf_len);
+
+ if (!offset && !buf_len)
+ return true;
+
+ Addr address(entry_->data_addr[index]);
+ if (address.is_initialized()) {
+ if (address.is_block_file() && !MoveToLocalBuffer(index))
+ return false;
+
+ if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
+ // We are about to create a buffer for the first 16KB, make sure that we
+ // preserve existing data.
+ if (!CopyToLocalBuffer(index))
+ return false;
+ }
+ }
+
+ if (!user_buffers_[index].get())
+ user_buffers_[index].reset(new UserBuffer(backend_));
+
+ return PrepareBuffer(index, offset, buf_len);
+}
+
+// We get to this function with some data already stored. If there is a
+// truncation that results on data stored internally, we'll explicitly
+// handle the case here.
+bool EntryImplV3::HandleTruncation(int index, int offset, int buf_len) {
+ Addr address(entry_->data_addr[index]);
+
+ int current_size = entry_->data_size[index];
+ int new_size = offset + buf_len;
+
+ if (!new_size) {
+ // This is by far the most common scenario.
+ backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
+ entry_->data_addr[index] = 0;
+ entry_->data_size[index] = 0;
+ unreported_size_[index] = 0;
+ //entry_->Store();
+ DeleteData(address, index);
+
+ user_buffers_[index].reset();
+ return true;
+ }
+
+ // We never postpone truncating a file, if there is one, but we may postpone
+ // telling the backend about the size reduction.
+ if (user_buffers_[index].get()) {
+ DCHECK_GE(current_size, user_buffers_[index]->Start());
+ if (!address.is_initialized()) {
+ // There is no overlap between the buffer and disk.
+ if (new_size > user_buffers_[index]->Start()) {
+ // Just truncate our buffer.
+ DCHECK_LT(new_size, user_buffers_[index]->End());
+ user_buffers_[index]->Truncate(new_size);
+ return true;
+ }
+
+ // Just discard our buffer.
+ user_buffers_[index]->Reset();
+ return PrepareBuffer(index, offset, buf_len);
+ }
+
+ // There is some overlap or we need to extend the file before the
+ // truncation.
+ if (offset > user_buffers_[index]->Start())
+ user_buffers_[index]->Truncate(new_size);
+ UpdateSize(index, current_size, new_size);
+ if (!Flush(index, 0))
+ return false;
+ user_buffers_[index].reset();
+ }
+
+ // We have data somewhere, and it is not in a buffer.
+ DCHECK(!user_buffers_[index].get());
+ DCHECK(address.is_initialized());
+
+ if (new_size > kMaxBlockSize)
+ return true; // Let the operation go directly to disk.
+
+ return ImportSeparateFile(index, offset + buf_len);
+}
+
+bool EntryImplV3::CopyToLocalBuffer(int index) {
+ Addr address(entry_->data_addr[index]);
+ DCHECK(!user_buffers_[index].get());
+ DCHECK(address.is_initialized());
+
+ int len = std::min(entry_->data_size[index], kMaxBlockSize);
+ user_buffers_[index].reset(new UserBuffer(backend_));
+ user_buffers_[index]->Write(len, NULL, 0);
+
+ //arggg connect the original callback and all arguments for this operation into
+ //the operation that is going on.
+
+ File* file = GetBackingFile(address, index);
+ int offset = 0;
+
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ /*if (!file ||
+ !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) {
+ user_buffers_[index].reset();
+ return false;
+ }*/
+ return true;
+}
+
+bool EntryImplV3::MoveToLocalBuffer(int index) {
+ if (!CopyToLocalBuffer(index))
+ return false;
+
+ Addr address(entry_->data_addr[index]);
+ entry_->data_addr[index] = 0;
+ //entry_->Store();
+ DeleteData(address, index);
+
+ // If we lose this entry we'll see it as zero sized.
+ int len = entry_->data_size[index];
+ backend_->ModifyStorageSize(len - unreported_size_[index], 0);
+ unreported_size_[index] = len;
+ return true;
+}
+
+bool EntryImplV3::ImportSeparateFile(int index, int new_size) {
+ if (entry_->data_size[index] > new_size)
+ UpdateSize(index, entry_->data_size[index], new_size);
+
+ return MoveToLocalBuffer(index);
+}
+
+bool EntryImplV3::PrepareBuffer(int index, int offset, int buf_len) {
+ DCHECK(user_buffers_[index].get());
+ if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
+ offset > entry_->data_size[index]) {
+ // We are about to extend the buffer or the file (with zeros), so make sure
+ // that we are not overwriting anything.
+ Addr address(entry_->data_addr[index]);
+ if (address.is_initialized() && address.is_separate_file()) {
+ if (!Flush(index, 0))
+ return false;
+ // There is an actual file already, and we don't want to keep track of
+ // its length so we let this operation go straight to disk.
+ // The only case when a buffer is allowed to extend the file (as in fill
+ // with zeros before the start) is when there is no file yet to extend.
+ user_buffers_[index].reset();
+ return true;
+ }
+ }
+
+ if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
+ if (!Flush(index, offset + buf_len))
+ return false;
+
+ // Lets try again.
+ if (offset > user_buffers_[index]->End() ||
+ !user_buffers_[index]->PreWrite(offset, buf_len)) {
+ // We cannot complete the operation with a buffer.
+ DCHECK(!user_buffers_[index]->Size());
+ DCHECK(!user_buffers_[index]->Start());
+ user_buffers_[index].reset();
+ }
+ }
+ return true;
+}
+
+bool EntryImplV3::Flush(int index, int min_len) {
+ Addr address(entry_->data_addr[index]);
+ DCHECK(user_buffers_[index].get());
+ DCHECK(!address.is_initialized() || address.is_separate_file());
+ DVLOG(3) << "Flush";
+
+ int size = std::max(entry_->data_size[index], min_len);
+ if (size && !address.is_initialized() && !CreateDataBlock(index, size))
+ return false;
+
+ if (!entry_->data_size[index]) {
+ DCHECK(!user_buffers_[index]->Size());
+ return true;
+ }
+
+ address.set_value(entry_->data_addr[index]);
+
+ int len = user_buffers_[index]->Size();
+ int offset = user_buffers_[index]->Start();
+ if (!len && !offset)
+ return true;
+
+ backend_->WriteData(this, address, offset, user_buffers_[index]->Get(),
+ len, CompletionCallback());
+ user_buffers_[index]->Reset();
+ return true;
+}
+
+void EntryImplV3::UpdateSize(int index, int old_size, int new_size) {
+ if (entry_->data_size[index] == new_size)
+ return;
+
+ unreported_size_[index] += new_size - old_size;
+ entry_->data_size[index] = new_size;
+ modified_ = true;
+}
+
+int EntryImplV3::InitSparseData() {
+ //if (sparse_.get())
+ // return net::OK;
+
+ //// Use a local variable so that sparse_ never goes from 'valid' to NULL.
+ //scoped_ptr<SparseControl> sparse(new SparseControl(this));
+ //int result = sparse->Init();
+ //if (net::OK == result)
+ // sparse_.swap(sparse);
+
+ //return result;
+ return 0;
+}
+
+void EntryImplV3::SetEntryFlags(uint32 flags) {
+ entry_->flags |= flags;
+ modified_ = true;
+}
+
+uint32 EntryImplV3::GetEntryFlags() {
+ return entry_->flags;
+}
+
+void EntryImplV3::GetData(int index, char** buffer, Addr* address) {
+ //DCHECK(backend_);
+ //if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
+ // !user_buffers_[index]->Start()) {
+ // // The data is already in memory, just copy it and we're done.
+ // int data_len = entry_->data_size[index];
+ // if (data_len <= user_buffers_[index]->Size()) {
+ // DCHECK(!user_buffers_[index]->Start());
+ // *buffer = new char[data_len];
+ // memcpy(*buffer, user_buffers_[index]->Data(), data_len);
+ // return;
+ // }
+ //}
+
+ //// Bad news: we'd have to read the info from disk so instead we'll just tell
+ //// the caller where to read from.
+ //*buffer = NULL;
+ //address->set_value(entry_->data_addr[index]);
+ //if (address->is_initialized()) {
+ // // Prevent us from deleting the block from the backing store.
+ // backend_->ModifyStorageSize(entry_->data_size[index] -
+ // unreported_size_[index], 0);
+ // entry_->data_addr[index] = 0;
+ // entry_->data_size[index] = 0;
+ //}
+}
+
+void EntryImplV3::Log(const char* msg) {
+ /*int dirty = 0;
+ if (node_.HasData()) {
+ dirty = node_.dirty;
+ }
+
+ Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
+ entry_->address().value(), node_.address().value());
+
+ Trace(" data: 0x%x 0x%x 0x%x", entry_->data_addr[0],
+ entry_->data_addr[1], entry_->long_key);
+
+ Trace(" doomed: %d 0x%x", doomed_, dirty);*/
+}
+
+} // namespace disk_cache
Property changes on: net\disk_cache\v3\entry_operation.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
« no previous file with comments | « net/disk_cache/v3/entry_operation.h ('k') | net/disk_cache/v3/eviction_v3.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698