Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(806)

Unified Diff: net/disk_cache/v3/entry_impl_v3.cc

Issue 15203004: Disk cache: Reference CL for the implementation of file format version 3. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: IndexTable review Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/disk_cache/v3/entry_impl_v3.h ('k') | net/disk_cache/v3/entry_operation.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/disk_cache/v3/entry_impl_v3.cc
===================================================================
--- net/disk_cache/v3/entry_impl_v3.cc (revision 0)
+++ net/disk_cache/v3/entry_impl_v3.cc (revision 0)
@@ -0,0 +1,1429 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/v3/entry_impl_v3.h"
+
+#include "base/hash.h"
+#include "base/message_loop.h"
+#include "base/metrics/histogram.h"
+#include "base/string_util.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/bitmap.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/net_log_parameters.h"
+#include "net/disk_cache/storage_block-inl.h"
+#include "net/disk_cache/v3/backend_impl_v3.h"
+#include "net/disk_cache/v3/disk_format_v3.h"
+#include "net/disk_cache/v3/histogram_macros.h"
+#include "net/disk_cache/v3/sparse_control_v3.h"
+
+using base::Time;
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace {
+
+const int kMinBufferSize = disk_cache::kMaxBlockSize;
+const int kMaxBufferSize = 1024 * 1024; // 1 MB.
+const int kKeyIndex = 0;
+
+} // namespace
+
+namespace disk_cache {
+
+typedef StorageBlock<EntryRecord> CacheEntryBlockV3;
+typedef StorageBlock<ShortEntryRecord> CacheShortEntryBlock;
+
+// This class handles individual memory buffers that store data before it is
+// sent to disk. The buffer can start at any offset, but if we try to write to
+// anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to jaja
+// zero. The buffer grows up to a size determined by the backend, to keep the
+// total memory used under control.
+class EntryImplV3::UserBuffer {
+ public:
+ explicit UserBuffer(BackendImplV3* backend)
+ : backend_(backend->GetWeakPtr()),
+ offset_(0),
+ grow_allowed_(true),
+ force_size_(false) {
+ buffer_ = new net::GrowableIOBuffer();
+ buffer_->SetCapacity(kMinBufferSize);
+ }
+ ~UserBuffer() {
+ if (backend_)
+ backend_->BufferDeleted(capacity() - kMinBufferSize);
+ }
+
+ // Returns true if we can handle writing |len| bytes to |offset|.
+ bool PreWrite(int offset, int len);
+
+ // Truncates the buffer to |offset| bytes.
+ void Truncate(int offset);
+
+ // Writes |len| bytes from |buf| at the given |offset|.
+ void Write(int offset, IOBuffer* buf, int len);
+
+ // Returns true if we can read |len| bytes from |offset|, given that the
+ // actual file has |eof| bytes stored. Note that the number of bytes to read
+ // may be modified by this method even though it returns false: that means we
+ // should do a smaller read from disk.
+ bool PreRead(int eof, int offset, int* len);
+
+ // Read |len| bytes from |buf| at the given |offset|.
+ int Read(int offset, IOBuffer* buf, int len);
+
+ void Rebase();
+
+ void ForceSize(bool value);
+
+ net::IOBuffer* Get();
+ int Size() { return static_cast<int>(buffer_->offset()); }
+ int Start() { return offset_; }
+ int End() { return offset_ + Size(); }
+
+ private:
+ int capacity() { return buffer_->capacity(); }
+ bool GrowBuffer(int required, int limit);
+
+ base::WeakPtr<BackendImplV3> backend_;
+ int offset_;
+ scoped_refptr<net::GrowableIOBuffer> buffer_;
+ bool grow_allowed_;
+ bool force_size_;
+ DISALLOW_COPY_AND_ASSIGN(UserBuffer);
+};
+
+bool EntryImplV3::UserBuffer::PreWrite(int offset, int len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(len, 0);
+ DCHECK_GE(offset + len, 0);
+
+ // We don't want to write before our current start.
+ if (offset < offset_)
+ return false;
+
+ // Lets get the common case out of the way.
+ if (offset + len <= capacity())
+ return true;
+
+ if (!Size())
+ return GrowBuffer(len, kMaxBufferSize);
+
+ int required = offset - offset_ + len;
+ return GrowBuffer(required, kMaxBufferSize * 6 / 5);
+}
+
+void EntryImplV3::UserBuffer::Truncate(int offset) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(offset, offset_);
+ DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
+
+ offset -= offset_;
+ if (Size() >= offset)
+ buffer_->set_offset(offset);
+}
+
+void EntryImplV3::UserBuffer::Write(int offset, IOBuffer* buf, int len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(len, 0);
+ DCHECK_GE(offset + len, 0);
+ DCHECK_GE(offset, offset_);
+ DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
+
+ if (!Size())
+ offset_ = offset;
+
+ offset -= offset_;
+
+ if (offset > Size()) {
+ memset(buffer_->data(), 0, offset - Size());
+ buffer_->set_offset(offset);
+ }
+
+ if (!len)
+ return;
+
+ char* buffer = buf->data();
+ int valid_len = Size() - offset;
+ int copy_len = std::min(valid_len, len);
+ if (copy_len) {
+ memcpy(buffer_->StartOfBuffer() + offset, buffer, copy_len);
+ len -= copy_len;
+ buffer += copy_len;
+ }
+ if (!len)
+ return;
+
+ memcpy(buffer_->data(), buffer, len);
+ buffer_->set_offset(buffer_->offset() + len);
+}
+
+bool EntryImplV3::UserBuffer::PreRead(int eof, int offset, int* len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GT(*len, 0);
+
+ if (offset < offset_) {
+ // We are reading before this buffer.
+ if (offset >= eof)
+ return true;
+
+ // If the read overlaps with the buffer, change its length so that there is
+ // no overlap.
+ *len = std::min(*len, offset_ - offset);
+ *len = std::min(*len, eof - offset);
+
+ // We should read from disk.
+ return false;
+ }
+
+ if (!Size())
+ return false;
+
+ // See if we can fulfill the first part of the operation.
+ return (offset - offset_ < Size());
+}
+
+int EntryImplV3::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GT(len, 0);
+ DCHECK(Size() || offset < offset_);
+
+ int clean_bytes = 0;
+ if (offset < offset_) {
+ // We don't have a file so lets fill the first part with 0.
+ clean_bytes = std::min(offset_ - offset, len);
+ memset(buf->data(), 0, clean_bytes);
+ if (len == clean_bytes)
+ return len;
+ offset = offset_;
+ len -= clean_bytes;
+ }
+
+ int start = offset - offset_;
+ int available = Size() - start;
+ DCHECK_GE(start, 0);
+ DCHECK_GE(available, 0);
+ len = std::min(len, available);
+ memcpy(buf->data() + clean_bytes, buffer_->StartOfBuffer() + start, len);
+ return len + clean_bytes;
+}
+
+void EntryImplV3::UserBuffer::Rebase() {
+ DCHECK(!Size());
+ DCHECK(offset_ < capacity());
+ memset(buffer_->data(), 0, offset_);
+ buffer_->set_offset(offset_);
+ offset_ = 0;
+}
+
+void EntryImplV3::UserBuffer::ForceSize(bool value) {
+ force_size_ = value;
+}
+
+net::IOBuffer* EntryImplV3::UserBuffer::Get() {
+ buffer_->set_offset(0);
+ return buffer_.get();
+}
+
+bool EntryImplV3::UserBuffer::GrowBuffer(int required, int limit) {
+ DCHECK_GE(required, 0);
+ int current_size = capacity();
+ if (required <= current_size)
+ return true;
+
+ if (required > limit)
+ return false;
+
+ if (!backend_)
+ return false;
+
+ int to_add = std::max(required - current_size, kMinBufferSize * 4);
+ to_add = std::max(current_size, to_add);
+ required = std::min(current_size + to_add, limit);
+
+ grow_allowed_ = backend_->IsAllocAllowed(current_size, required, force_size_);
+ force_size_ = false;
+ if (!grow_allowed_)
+ return false;
+
+ DVLOG(3) << "Buffer grow to " << required;
+
+ buffer_->SetCapacity(required);
+ return true;
+}
+
+// ------------------------------------------------------------------------
+
+EntryImplV3::EntryImplV3(BackendImplV3* backend, Addr address, bool read_only)
+ : backend_(backend->GetWeakPtr()),
+ address_(address),
+ num_handles_(0),
+ doomed_(false),
+ read_only_(read_only),
+ dirty_(true),
+ modified_(false),
+ callback_(base::Bind(&EntryImplV3::OnIOComplete,
+ base::Unretained(this))) {
+ for (int i = 0; i < kNumStreams; i++) {
+ unreported_size_[i] = 0;
+ }
+}
+
+EntryImplV3::EntryImplV3(BackendImplV3* backend,
+ Addr address,
+ const std::string& key,
+ scoped_ptr<EntryRecord> record)
+ : entry_(record.Pass()),
+ backend_(backend->GetWeakPtr()),
+ key_(key),
+ address_(address),
+ num_handles_(0),
+ doomed_(false),
+ read_only_(false),
+ dirty_(false),
+ modified_(false),
+ callback_(base::Bind(&EntryImplV3::OnIOComplete,
+ base::Unretained(this))) {
+ for (int i = 0; i < kNumStreams; i++) {
+ unreported_size_[i] = 0;
+ }
+}
+
+EntryImplV3::EntryImplV3(BackendImplV3* backend,
+ Addr address,
+ const std::string& key,
+ scoped_ptr<ShortEntryRecord> record)
+ : short_entry_(record.Pass()),
+ backend_(backend->GetWeakPtr()),
+ key_(key),
+ address_(address),
+ num_handles_(0),
+ doomed_(false),
+ read_only_(false),
+ dirty_(false),
+ modified_(false),
+ callback_(base::Bind(&EntryImplV3::OnIOComplete,
+ base::Unretained(this))) {
+ for (int i = 0; i < kNumStreams; i++) {
+ unreported_size_[i] = 0;
+ }
+}
+
+void EntryImplV3::CreateEntry(const std::string& key, uint32 hash,
+ ShortEntryRecord* old_info) {
+ Trace("Create entry In");
+
+ key_ = key;
+ entry_.reset(new EntryRecord);
+ memset(entry_.get(), 0, sizeof(*entry_.get()));
+ entry_->key_len = static_cast<int>(key.size());
+ entry_->hash = hash;
+ entry_->creation_time = backend_->GetTime().ToInternalValue();
+ entry_->last_access_time = entry_->creation_time;
+ entry_->last_modified_time = entry_->creation_time;
+ dirty_ = true;
+ backend_->UpdateRank(this, true);
+
+ if (old_info) {
+ entry_->reuse_count = old_info->reuse_count;
+ entry_->refetch_count = old_info->refetch_count;
+ }
+
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ CACHE_UMA(COUNTS, "KeySize", static_cast<int32>(key.size()));
+
+ WriteKey();
+ num_handles_++;
+ Log("Create Entry ");
+}
+
+void EntryImplV3::OnOpenEntry() {
+ num_handles_++;
+}
+
+scoped_ptr<ShortEntryRecord> EntryImplV3::GetShortEntryRecord() {
+ return short_entry_.Pass();
+}
+
+uint32 EntryImplV3::GetHash() const {
+ return entry_->hash;
+}
+
+Addr EntryImplV3::GetAddress() const {
+ return address_;
+}
+
+int EntryImplV3::GetReuseCounter() const {
+ return entry_->reuse_count;
+}
+
+void EntryImplV3::SetReuseCounter(int counter) {
+ DCHECK_LT(counter, 256);
+ DCHECK_GE(counter, 0);
+ entry_->reuse_count = static_cast<uint8>(counter);
+ dirty_ = true;
+}
+
+int EntryImplV3::GetRefetchCounter() const {
+ return entry_->refetch_count;
+}
+
+void EntryImplV3::SetRefetchCounter(int counter) {
+ DCHECK_LT(counter, 256);
+ DCHECK_GE(counter, 0);
+ entry_->refetch_count = static_cast<uint8>(counter);
+ dirty_ = true;
+}
+
+bool EntryImplV3::IsSameEntry(const std::string& key, uint32 hash) {
+ if (entry_->hash != hash ||
+ static_cast<size_t>(entry_->key_len) != key.size())
+ return false;
+
+ return (key.compare(GetKey()) == 0);
+}
+
+void EntryImplV3::InternalDoom() {
+ DCHECK(!doomed_);
+ net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
+ doomed_ = true;
+ dirty_ = true;
+}
+
+bool EntryImplV3::SanityCheck() {
+ DCHECK(BasicSanityCheck(*entry_.get()));
+
+ if (entry_->reuse_count < 0 || entry_->refetch_count < 0)
+ return false;
+
+ if (entry_->state > ENTRY_USED || entry_->state < ENTRY_NEW)
+ return false;
+
+ return true;
+}
+
+bool EntryImplV3::DataSanityCheck() {
+ if (entry_->hash != base::Hash(GetKey()))
+ return false;
+
+ for (int i = 0; i < kNumStreams; i++) {
+ Addr data_addr(entry_->data_addr[i]);
+ int data_size = entry_->data_size[i];
+ if (data_size < 0)
+ return false;
+ if (!data_size && data_addr.is_initialized())
+ return false;
+ if (!data_addr.SanityCheckV3())
+ return false;
+ if (!data_size)
+ continue;
+ if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
+ return false;
+ if (data_size > kMaxBlockSize && data_addr.is_block_file())
+ return false;
+ }
+ return true;
+}
+
+// Static.
+bool EntryImplV3::BasicSanityCheck(const EntryRecord& record) {
+ CacheEntryBlockV3 entry_block;
+ entry_block.SetData(const_cast<EntryRecord*>(&record));
+ if (!entry_block.VerifyHash())
+ return false;
+
+ if (record.key_len <= 0 || record.data_size[0] < record.key_len)
+ return false;
+
+ Addr data_addr(record.data_addr[0]);
+ if (!data_addr.is_initialized() || !data_addr.SanityCheckV3())
+ return false;
+
+ if (record.data_size[0] <= kMaxBlockSize && data_addr.is_separate_file())
+ return false;
+
+ if (record.data_size[0] > kMaxBlockSize && data_addr.is_block_file())
+ return false;
+
+ return true;
+}
+
+// Static.
+bool EntryImplV3::DeletedSanityCheck(const ShortEntryRecord& record) {
+ CacheShortEntryBlock entry_block;
+ entry_block.SetData(const_cast<ShortEntryRecord*>(&record));
+ if (!entry_block.VerifyHash())
+ return false;
+
+ if (record.key_len <= 0)
+ return false;
+
+ return true;
+}
+
+void EntryImplV3::FixForDelete() {
+ //EntryStore* stored = entry_.Data();
+ //Addr key_addr(stored->long_key);
+
+ //if (!key_addr.is_initialized())
+ // stored->key[stored->key_len] = '\0';
+
+ //for (int i = 0; i < kNumStreams; i++) {
+ // Addr data_addr(stored->data_addr[i]);
+ // int data_size = stored->data_size[i];
+ // if (data_addr.is_initialized()) {
+ // if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
+ // (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
+ // !data_addr.SanityCheck()) {
+ // STRESS_NOTREACHED();
+ // // The address is weird so don't attempt to delete it.
+ // stored->data_addr[i] = 0;
+ // // In general, trust the stored size as it should be in sync with the
+ // // total size tracked by the backend.
+ // }
+ // }
+ // if (data_size < 0)
+ // stored->data_size[i] = 0;
+ //}
+ //entry_.Store();
+}
+
+void EntryImplV3::SetTimes(base::Time last_used, base::Time last_modified) {
+ entry_->last_access_time = last_used.ToInternalValue();
+ entry_->last_modified_time = last_modified.ToInternalValue();
+ dirty_ = true;
+}
+
+void EntryImplV3::BeginLogging(net::NetLog* net_log, bool created) {
+ DCHECK(!net_log_.net_log());
+ net_log_ = net::BoundNetLog::Make(
+ net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY);
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL,
+ CreateNetLogEntryCreationCallback(this, created));
+}
+
+const net::BoundNetLog& EntryImplV3::net_log() const {
+ return net_log_;
+}
+
+void EntryImplV3::NotifyDestructionForTest(const CompletionCallback& callback) {
+ DCHECK(destruction_callback_.is_null());
+ destruction_callback_ = callback;
+}
+
+// ------------------------------------------------------------------------
+
+void EntryImplV3::Doom() {
+ if (doomed_ || !backend_)
+ return;
+
+ backend_->InternalDoomEntry(this);
+}
+
+void EntryImplV3::Close() {
+ num_handles_--;
+ if (!num_handles_) {
+ if (sparse_.get())
+ sparse_->Close();
+
+ if (!pending_operations_.empty()) {
+ PendingOperation op =
+ { PENDING_CLEANUP, 0, 0, NULL, 0, CompletionCallback(), false };
+ pending_operations_.push(op);
+ } else {
+ Cleanup();
+ }
+ }
+ Release();
+}
+
+std::string EntryImplV3::GetKey() const {
+ return key_;
+}
+
+Time EntryImplV3::GetLastUsed() const {
+ return Time::FromInternalValue(entry_->last_access_time);
+}
+
+Time EntryImplV3::GetLastModified() const {
+ return Time::FromInternalValue(entry_->last_modified_time);
+}
+
+int32 EntryImplV3::GetDataSize(int index) const {
+ if (index < 0 || index >= kNumStreams)
+ return 0;
+
+ return GetAdjustedSize(index, entry_->data_size[index]);
+}
+
+int32 EntryImplV3::GetAdjustedSize(int index, int real_size) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LE(index, kNumStreams);
+
+ if (index == kKeyIndex)
+ return real_size - key_.size();
+
+ return real_size;
+}
+
+int EntryImplV3::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!pending_operations_.empty()) {
+ PendingOperation op =
+ { PENDING_READ, index, offset, buf, buf_len, callback, false };
+ pending_operations_.push(op);
+ return net::ERR_IO_PENDING;
+ }
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_ENTRY_READ_DATA,
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, false));
+ }
+
+ int result = ReadDataImpl(index, offset, buf, buf_len, NULL, callback);
+
+ if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_ENTRY_READ_DATA,
+ CreateNetLogReadWriteCompleteCallback(result));
+ }
+ return result;
+}
+
+int EntryImplV3::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback, bool truncate) {
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!pending_operations_.empty()) {
+ PendingOperation op =
+ { PENDING_WRITE, index, offset, buf, buf_len, callback, truncate };
+ pending_operations_.push(op);
+ return net::ERR_IO_PENDING;
+ }
+
+ if (net_log_.IsLoggingAllEvents()) {
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_ENTRY_WRITE_DATA,
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate));
+ }
+
+ int result = WriteDataImpl(index, offset, buf, buf_len, NULL, callback,
+ truncate);
+
+ if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
+ net_log_.EndEvent(
+ net::NetLog::TYPE_ENTRY_WRITE_DATA,
+ CreateNetLogReadWriteCompleteCallback(result));
+ }
+ return result;
+}
+
+int EntryImplV3::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (!sparse_.get())
+ sparse_.reset(new SparseControlV3(this));
+
+ TimeTicks start = TimeTicks::Now();
+ int result = sparse_->StartIO(SparseControlV3::kReadOperation, offset, buf,
+ buf_len, callback);
+ ReportIOTime(kSparseRead, start);
+ return result;
+}
+
+int EntryImplV3::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
+ const CompletionCallback& callback) {
+ if (!sparse_.get())
+ sparse_.reset(new SparseControlV3(this));
+
+ TimeTicks start = TimeTicks::Now();
+ int result = sparse_->StartIO(SparseControlV3::kWriteOperation, offset, buf,
+ buf_len, callback);
+ ReportIOTime(kSparseWrite, start);
+ return result;
+}
+
+int EntryImplV3::GetAvailableRange(int64 offset, int len, int64* start,
+ const CompletionCallback& callback) {
+ if (!sparse_.get())
+ sparse_.reset(new SparseControlV3(this));
+
+ return sparse_->GetAvailableRange(offset, len, start, callback);
+}
+
+bool EntryImplV3::CouldBeSparse() const {
+ if (sparse_.get())
+ return sparse_->CouldBeSparse();
+
+ scoped_ptr<SparseControlV3> sparse;
+ sparse.reset(new SparseControlV3(const_cast<EntryImplV3*>(this)));
+ return sparse->CouldBeSparse();
+}
+
+void EntryImplV3::CancelSparseIO() {
+ if (!sparse_.get())
+ return;
+
+ sparse_->CancelIO();
+}
+
+int EntryImplV3::ReadyForSparseIO(const CompletionCallback& callback) {
+ if (!sparse_.get())
+ return net::OK;
+
+ return sparse_->ReadyToUse(callback);
+}
+
+// ------------------------------------------------------------------------
+
+// When an entry is deleted from the cache, we clean up all the data associated
+// with it for two reasons: to simplify the reuse of the block (we know that any
+// unused block is filled with zeros), and to simplify the handling of write /
+// read partial information from an entry (don't have to worry about returning
+// data related to a previous cache entry because the range was not fully
+// written before).
+EntryImplV3::~EntryImplV3() {
+ if (!backend_)
+ return;
+ Log("~EntryImplV3 in");
+
+ DCHECK(!dirty_);
+
+ // Remove this entry from the list of open entries.
+ backend_->OnEntryDestroyBegin(address_);
+
+ Trace("~EntryImplV3 out 0x%p", reinterpret_cast<void*>(this));
+ net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL);
+ backend_->OnEntryDestroyEnd();
+
+ if (!destruction_callback_.is_null())
+ destruction_callback_.Run(net::OK);
+}
+
+void EntryImplV3::Cleanup() {
+ if (!backend_ || !dirty_)
+ return;
+
+ Log("Cleanup in");
+
+ bool success = true;
+ if (doomed_) {
+ success = DeleteEntryData();
+ } else {
+#if defined(NET_BUILD_STRESS_CACHE)
+ SanityCheck();
+#endif
+ net_log_.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE);
+ for (int index = 0; index < kNumStreams; index++) {
+ if (user_buffers_[index].get()) {
+ int rv = Flush(index, 0);
+ if (rv != net::OK) {
+ DCHECK_EQ(rv, net::ERR_IO_PENDING);
+ PendingOperation op =
+ { PENDING_DONE, 0, 0, NULL, 0, CompletionCallback(), false };
+ pending_operations_.push(op);
+ }
+ }
+ Addr address(entry_->data_addr[index]);
+ if (address.is_separate_file())
+ backend_->Close(this, address);
+ if (unreported_size_[index]) {
+ backend_->ModifyStorageSize(
+ entry_->data_size[index] - unreported_size_[index],
+ entry_->data_size[index]);
+ }
+ }
+
+ if (dirty_) {
+ entry_->state = ENTRY_USED;
+ WriteEntryData();
+ }
+
+ backend_->OnEntryCleanup(this);
+ }
+
+ if (success)
+ dirty_ = false;
+ Trace("~Cleanup out 0x%p", reinterpret_cast<void*>(this));
+}
+
+void EntryImplV3::WriteKey() {
+ DCHECK(!user_buffers_[kKeyIndex]);
+ DCHECK(!entry_->data_addr[kKeyIndex]);
+ DCHECK(!entry_->data_size[kKeyIndex]);
+
+ user_buffers_[kKeyIndex].reset(new UserBuffer(backend_));
+ scoped_refptr<net::IOBuffer> buffer(new net::WrappedIOBuffer(key_.data()));
+
+ user_buffers_[kKeyIndex]->ForceSize(true);
+ bool rv = user_buffers_[kKeyIndex]->PreWrite(0, key_.size() + 1024);
+ DCHECK(rv);
+ user_buffers_[kKeyIndex]->ForceSize(false);
+ user_buffers_[kKeyIndex]->Write(0, buffer, key_.size());
+ modified_ = true;
+ dirty_ = true;
+ UpdateSize(kKeyIndex, 0, key_.size());
+}
+
+int EntryImplV3::ReadDataImpl(int index, int offset,
+ IOBuffer* buf, int buf_len,
+ PendingOperation* operation,
+ const CompletionCallback& callback) {
+ DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (index == kKeyIndex)
+ offset += key_.size();
+
+ int entry_size = entry_->data_size[index];
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!backend_)
+ return net::ERR_UNEXPECTED;
+
+ TimeTicks start = TimeTicks::Now();
+
+ if (offset + buf_len > entry_size)
+ buf_len = entry_size - offset;
+
+ if (!buf_len)
+ return 0;
+
+ UpdateRank(false);
+
+ backend_->OnEvent(Stats::READ_DATA);
+ backend_->OnRead(buf_len);
+
+ Addr address(entry_->data_addr[index]);
+ int eof = address.is_initialized() ? entry_size : 0;
+ if (user_buffers_[index].get() &&
+ user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
+ // Complete the operation locally.
+ buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
+ ReportIOTime(kRead, start);
+ return buf_len;
+ }
+
+ address.set_value(entry_->data_addr[index]);//? again?
+ DCHECK(address.is_initialized());
+ if (!address.is_initialized()) {
+ Doom();//?
+ return net::ERR_FAILED;
+ }
+
+ if (operation)
+ operation->action = PENDING_DONE;
+ backend_->ReadData(this, address, offset, buf, buf_len, callback);
+ return net::ERR_IO_PENDING;
+}
+
+int EntryImplV3::WriteDataImpl(int index, int offset,
+ IOBuffer* buf, int buf_len,
+ PendingOperation* operation,
+ const CompletionCallback& callback,
+ bool truncate) {
+ DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (!backend_)
+ return net::ERR_UNEXPECTED;
+
+ int max_file_size = backend_->MaxFileSize();
+
+ // offset or buf_len could be negative numbers.
+ if (offset > max_file_size || buf_len > max_file_size ||
+ offset + buf_len > max_file_size) {
+ int size = offset + buf_len;
+ if (size <= max_file_size)
+ size = kint32max;
+ backend_->TooMuchStorageRequested(size);
+ return net::ERR_FAILED;
+ }
+
+ int actual_offset = (index == kKeyIndex) ? offset + key_.size() : offset;
+
+ TimeTicks start = TimeTicks::Now();
+
+ // Read the size at this point (it may change inside prepare).
+ int entry_size = entry_->data_size[index];
+ bool extending = entry_size < actual_offset + buf_len;
+ truncate = truncate && entry_size > actual_offset + buf_len;
+ Trace("To PrepareTarget 0x%x", address_.value());
+
+ int rv = PrepareTarget(index, actual_offset, buf_len, truncate);
+ if (rv == net::ERR_IO_PENDING) {
+ if (operation) {
+ DCHECK_EQ(operation->action, PENDING_WRITE);
+ operation->action = PENDING_FLUSH;
+ } else {
+ PendingOperation op =
+ { PENDING_FLUSH, index, offset, buf, buf_len, callback, truncate };
+ pending_operations_.push(op);
+ }
+ return rv;
+ }
+
+ if (rv != net::OK)
+ return rv;
+
+ Trace("From PrepareTarget 0x%x", address_.value());
+ if (extending || truncate)
+ UpdateSize(index, entry_size, actual_offset + buf_len);
+
+ UpdateRank(true);
+ OnEntryModified();
+
+ backend_->OnEvent(Stats::WRITE_DATA);
+ backend_->OnWrite(buf_len);
+
+ if (user_buffers_[index].get()) {
+ // Complete the operation locally.
+ user_buffers_[index]->Write(actual_offset, buf, buf_len);
+ ReportIOTime(kWrite, start);
+ return buf_len;
+ }
+
+ Addr address(entry_->data_addr[index]);
+ if (actual_offset + buf_len == 0) {
+ if (truncate) {
+ DCHECK(!address.is_initialized());
+ }
+ return 0;
+ }
+
+ if (address.is_separate_file() && (truncate || (extending && !buf_len)))
+ backend_->Truncate(this, address, actual_offset + buf_len);
+
+ if (!buf_len)
+ return 0;
+
+ if (operation)
+ operation->action = PENDING_DONE;
+ backend_->WriteData(this, address, actual_offset, buf, buf_len, callback);
+ return net::ERR_IO_PENDING;
+}
+
+// ------------------------------------------------------------------------
+
+bool EntryImplV3::CreateDataBlock(int index, int size) {
+ DCHECK(index >= 0 && index < kNumStreams);
+
+ Addr address(entry_->data_addr[index]);
+ if (!CreateBlock(size, &address))
+ return false;
+
+ entry_->data_addr[index] = address.value();
+ return true;
+}
+
+bool EntryImplV3::CreateBlock(int size, Addr* address) {
+ DCHECK(!address->is_initialized());
+ if (!backend_)
+ return false;
+
+ FileType file_type = Addr::RequiredFileType(size);
+ if (EXTERNAL != file_type) {
+ int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) /
+ Addr::BlockSizeForFileType(file_type);
+
+ return backend_->CreateBlock(file_type, num_blocks, address);
+ }
+
+ if (size > backend_->MaxFileSize())
+ return false;
+
+ Addr block_address;
+ if (!backend_->CreateBlock(BLOCK_FILES, 1, &block_address))
+ return false;
+ *address = block_address.AsExternal();
+
+ scoped_refptr<net::IOBufferWithSize> buffer(
+ new net::IOBufferWithSize(2 * sizeof(uint32)));
+ memcpy(buffer->data(), &entry_->hash, buffer->size());
+
+ backend_->WriteData(this, block_address, 0, buffer, buffer->size(),
+ CompletionCallback());
+ return true;
+}
+
+// Note that this method may end up modifying a block file so upon return the
+// involved block will be free, and could be reused for something else. If there
+// is a crash after that point (and maybe before returning to the caller), the
+// entry will be left dirty... and at some point it will be discarded; it is
+// important that the entry doesn't keep a reference to this address, or we'll
+// end up deleting the contents of |address| once again.
+void EntryImplV3::DeleteData(Addr address) {
+ DCHECK(backend_);
+ if (!address.is_initialized())
+ return;
+ backend_->Delete(this, address);
+}
+
+void EntryImplV3::UpdateRank(bool modified) {
+ if (!backend_)
+ return;
+
+ Time current = backend_->GetTime();
+ entry_->last_access_time = current.ToInternalValue();
+
+ if (modified)
+ entry_->last_modified_time = current.ToInternalValue();
+
+ if (!doomed_) {
+ backend_->UpdateRank(this, modified);
+ return;
+ }
+}
+
+bool EntryImplV3::DeleteEntryData() {
+ DCHECK(doomed_);
+ if (!backend_->ShouldDeleteNow(this))
+ return false;
+
+ if (GetEntryFlags() & PARENT_ENTRY) {
+ // We have some child entries that must go away.
+ SparseControlV3::DeleteChildren(this);
+ }
+
+ if (GetDataSize(0))
+ CACHE_UMA(COUNTS, "DeleteHeader", GetDataSize(0));
+ if (GetDataSize(1))
+ CACHE_UMA(COUNTS, "DeleteData", GetDataSize(1));
+ for (int index = 0; index < kNumStreams; index++) {
+ Addr address(entry_->data_addr[index]);
+ if (address.is_initialized()) {
+ backend_->ModifyStorageSize(entry_->data_size[index] -
+ unreported_size_[index], 0);
+ entry_->data_addr[index] = 0;
+ entry_->data_size[index] = 0;
+ dirty_ = true;
+ //entry_.Store();
+ DeleteData(address);
+ }
+ }
+
+ // Note that at this point node_ and entry_ are just two blocks of data, and
+ // even if they reference each other, nobody should be referencing them.
+
+ backend_->Delete(this, address_);
+ return true;
+}
+
+// We keep a memory buffer for everything that ends up stored on a block file
+// (because we don't know yet the final data size), and for some of the data
+// that end up on external files. This function will initialize that memory
+// buffer and / or the files needed to store the data.
+//
+// In general, a buffer may overlap data already stored on disk, and in that
+// case, the contents of the buffer are the most accurate. It may also extend
+// the file, but we don't want to read from disk just to keep the buffer up to
+// date. This means that as soon as there is a chance to get confused about what
+// is the most recent version of some part of a file, we'll flush the buffer and
+// reuse it for the new data. Keep in mind that the normal use pattern is quite
+// simple (write sequentially from the beginning), so we optimize for handling
+// that case.
+int EntryImplV3::PrepareTarget(int index, int offset, int buf_len,
+ bool truncate) {
+ if (truncate)
+ return HandleTruncation(index, offset, buf_len);
+
+ if (!offset && !buf_len)
+ return net::OK;
+
+ if (!IsSimpleWrite(index, offset, buf_len))
+ return HandleOldData(index, offset, buf_len);
+
+ if (!user_buffers_[index].get())
+ user_buffers_[index].reset(new UserBuffer(backend_));
+
+ return PrepareBuffer(index, offset, buf_len);
+}
+
+// We get to this function with some data already stored. If there is a
+// truncation that results on data stored internally, we'll explicitly
+// handle the case here.
+int EntryImplV3::HandleTruncation(int index, int offset, int buf_len) {
+ Addr address(entry_->data_addr[index]);
+
+ int current_size = entry_->data_size[index];
+ int new_size = offset + buf_len;
+ DCHECK_LT(new_size, current_size);
+
+ if (!new_size) {
+ // This is by far the most common scenario.
+ backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);//updatesize()
+ entry_->data_addr[index] = 0;
+ entry_->data_size[index] = 0;
+ unreported_size_[index] = 0;
+ OnEntryModified();
+ //entry_->Store();
+ DeleteData(address);
+
+ user_buffers_[index].reset();
+ return net::OK;
+ }
+
+ // We never postpone truncating a file, if there is one, but we may postpone
+ // telling the backend about the size reduction.
+ if (user_buffers_[index].get()) {
+ DCHECK_GE(current_size, user_buffers_[index]->Start());
+ if (!address.is_initialized()) {
+ // There is no overlap between the buffer and disk.
+ if (new_size > user_buffers_[index]->Start()) {
+ // Just truncate our buffer.
+ DCHECK_LT(new_size, user_buffers_[index]->End());
+ user_buffers_[index]->Truncate(new_size);
+ return net::OK;
+ }
+
+ // Just discard our buffer.
+ user_buffers_[index].reset();
+ return PrepareBuffer(index, offset, buf_len);
+ }
+
+ // There is some overlap or we need to extend the file before the
+ // truncation.
+ if (new_size > user_buffers_[index]->Start()) {
+ if (offset > user_buffers_[index]->Start())
+ user_buffers_[index]->Truncate(new_size);
+ UpdateSize(index, current_size, new_size);
+ int rv = Flush(index, 0);
+ if (rv != net::OK)
+ return rv;
+ }
+ user_buffers_[index].reset();
+ }
+
+ // We have data somewhere, and it is not in a buffer.
+ DCHECK(!user_buffers_[index].get());
+ DCHECK(address.is_initialized());
+
+ if (!IsSimpleWrite(index, offset, buf_len))
+ return net::OK; // Let the operation go directly to disk.
+
+ if (address.is_separate_file())
+ backend_->Truncate(this, address, offset + buf_len);
+
+ if (!user_buffers_[index].get())
+ user_buffers_[index].reset(new UserBuffer(backend_));
+
+ return PrepareBuffer(index, offset, buf_len);
+}
+
+bool EntryImplV3::IsSimpleWrite(int index, int offset, int buf_len) {
+ Addr address(entry_->data_addr[index]);
+ if (!address.is_initialized())
+ return true;
+
+ if (address.is_block_file() && (offset + buf_len > kMaxBlockSize))// check limit
+ return false;
+
+ if (!user_buffers_[index].get())
+ return true;
+
+ if ((offset >= user_buffers_[index]->Start()) &&
+ (offset <= user_buffers_[index]->End())) {
+ return true;
+ }
+
+ return offset > entry_->data_size[index];
+}
+
+int EntryImplV3::HandleOldData(int index, int offset, int buf_len) {
+ Addr address(entry_->data_addr[index]);
+ DCHECK(address.is_initialized());
+
+ if (address.is_block_file() && (offset + buf_len > kMaxBlockSize)) {// check limit
+ if (!GetAdjustedSize(index, offset) || !GetDataSize(index)) {
+ // There's nothing to save from the old data.
+ user_buffers_[index].reset();
+ DCHECK(!user_buffers_[kKeyIndex]);
+ DeleteData(address);
+ entry_->data_addr[kKeyIndex] = 0;
+ entry_->data_size[kKeyIndex] = 0;
+ WriteKey();
+ return PrepareBuffer(index, offset, buf_len);
+ }
+ // We have to move the data to a new file.
+ Addr new_address;
+ if (!CreateBlock(kMaxBlockSize * 2, &new_address))
+ return net::ERR_FAILED;
+
+ backend_->MoveData(this, address, new_address, entry_->data_size[index],
+ callback_);
+ entry_->data_addr[index] = new_address.value();
+ return net::ERR_IO_PENDING;
+ }
+
+ int rv = Flush(index, 0);
+ if (rv != net::OK)
+ return rv;
+
+ user_buffers_[index].reset(); // Don't use a local buffer.
+ return net::OK;
+}
+
+int EntryImplV3::PrepareBuffer(int index, int offset, int buf_len) {
+ DCHECK(user_buffers_[index].get());
+ int rv = net::OK;
+ if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
+ offset > entry_->data_size[index]) {
+ // We are about to extend the buffer or the file (with zeros), so make sure
+ // that we are not overwriting anything.
+ Addr address(entry_->data_addr[index]);
+ if (address.is_initialized() && address.is_separate_file()) {
+ rv = Flush(index, 0);
+ if (rv != net::OK)
+ return rv;
+ // There is an actual file already, and we don't want to keep track of
+ // its length so we let this operation go straight to disk.
+ // The only case when a buffer is allowed to extend the file (as in fill
+ // with zeros before the start) is when there is no file yet to extend.
+ user_buffers_[index].reset();
+ return rv;
+ }
+ }
+
+ if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
+ rv = Flush(index, offset + buf_len);
+ if (rv != net::OK)
+ return rv;
+
+ // Lets try again.
+ if (offset > user_buffers_[index]->End() ||
+ !user_buffers_[index]->PreWrite(offset, buf_len)) {
+ // We cannot complete the operation with a buffer.
+ DCHECK(!user_buffers_[index]->Size());
+ DCHECK(!user_buffers_[index]->Start());
+ user_buffers_[index].reset();
+ }
+ }
+ return rv;
+}
+
+int EntryImplV3::Flush(int index, int min_len) {
+ Addr address(entry_->data_addr[index]);
+ if (!user_buffers_[index].get())
+ return net::OK;
+
+ //DCHECK(!address.is_initialized() || address.is_separate_file());
+ DVLOG(3) << "Flush";
+
+ int size = std::max(entry_->data_size[index], min_len);
+ if (size && !address.is_initialized() && !CreateDataBlock(index, size))
+ return net::ERR_FAILED;
+
+ if (!entry_->data_size[index]) {
+ DCHECK(!user_buffers_[index]->Size());
+ return net::OK;
+ }
+
+ address.set_value(entry_->data_addr[index]);
+
+ int len = user_buffers_[index]->Size();
+ int offset = user_buffers_[index]->Start();
+ if (!len && !offset)
+ return net::OK;
+
+ if (!len) {
+ if (address.is_separate_file()) {
+ backend_->Truncate(this, address, offset);
+ return net::OK;
+ }
+ user_buffers_[index]->Rebase();
+ len = offset;
+ offset = 0;
+ }
+
+ backend_->WriteData(this, address, offset, user_buffers_[index]->Get(),
+ len, callback_);
+ user_buffers_[index].reset();
+ return net::ERR_IO_PENDING;
+}
+
+void EntryImplV3::UpdateSize(int index, int old_size, int new_size) {
+ if (entry_->data_size[index] == new_size)
+ return;
+
+ unreported_size_[index] += new_size - old_size;
+ entry_->data_size[index] = new_size;
+ OnEntryModified();
+}
+
+void EntryImplV3::WriteEntryData() {
+ CacheEntryBlockV3 entry_block;
+ entry_block.SetData(entry_.get());
+ entry_block.UpdateHash();
+
+ scoped_refptr<net::IOBufferWithSize> buffer(
+ new net::IOBufferWithSize(sizeof(EntryRecord)));
+ memcpy(buffer->data(), entry_.get(), buffer->size());
+
+ backend_->WriteData(this, address_, 0, buffer, buffer->size(),
+ CompletionCallback());
+}
+
+void EntryImplV3::SetEntryFlags(uint32 flags) {
+ entry_->flags |= flags;
+ dirty_ = true;
+}
+
+uint32 EntryImplV3::GetEntryFlags() {
+ return entry_->flags;
+}
+
+void EntryImplV3::OnEntryModified() {
+ if (modified_)
+ return;
+ DCHECK(!read_only_);
+ dirty_ = true;
+ modified_ = true;
+ if (backend_)
+ backend_->OnEntryModified(this);
+}
+
+void EntryImplV3::GetData(int index, scoped_refptr<IOBuffer>* buffer, Addr* address) {
+ DCHECK(backend_);
+ if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
+ !user_buffers_[index]->Start()) {
+ // The data is already in memory, just copy it and we're done.
+ int data_len = entry_->data_size[index];
+ if (data_len <= user_buffers_[index]->Size()) {
+ DCHECK(!user_buffers_[index]->Start());
+ *buffer = user_buffers_[index]->Get();
+ return;
+ }
+ }
+
+ // Bad news: we'd have to read the info from disk so instead we'll just tell
+ // the caller where to read from.
+ *buffer = NULL;
+ address->set_value(entry_->data_addr[index]);
+ if (address->is_initialized()) {
+ // Prevent us from deleting the block from the backing store.
+ backend_->ModifyStorageSize(entry_->data_size[index] -
+ unreported_size_[index], 0);
+ entry_->data_addr[index] = 0;
+ entry_->data_size[index] = 0;
+ }
+}
+
+void EntryImplV3::ReportIOTime(Operation op, const base::TimeTicks& start) {
+ if (!backend_)
+ return;
+
+ switch (op) {
+ case kRead:
+ CACHE_UMA(AGE_MS, "ReadTime", start);
+ break;
+ case kWrite:
+ CACHE_UMA(AGE_MS, "WriteTime", start);
+ break;
+ case kSparseRead:
+ CACHE_UMA(AGE_MS, "SparseReadTime", start);
+ break;
+ case kSparseWrite:
+ CACHE_UMA(AGE_MS, "SparseWriteTime", start);
+ break;
+ case kAsyncIO:
+ CACHE_UMA(AGE_MS, "AsyncIOTime", start);
+ break;
+ case kReadAsync1:
+ CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", start);
+ break;
+ case kWriteAsync1:
+ CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", start);
+ break;
+ default:
+ NOTREACHED();
+ }
+}
+
+void EntryImplV3::Log(const char* msg) {
+ Trace("%s 0x%p 0x%x", msg, reinterpret_cast<void*>(this), address_);
+ Trace(" data: 0x%x 0x%x", entry_->data_addr[0], entry_->data_addr[1]);
+ Trace(" doomed: %d", doomed_);
+}
+
+void EntryImplV3::OnIOComplete(int result) {
+ DCHECK_NE(result, net::ERR_IO_PENDING);
+ DCHECK(!pending_operations_.empty());
+ while (result != net::ERR_IO_PENDING) {
+ bool finished = false;
+ PendingOperation& next = pending_operations_.front();
+ switch (next.action) {
+ case PENDING_FLUSH:
+ if (result < 0)
+ finished = true;
+ next.action = PENDING_WRITE;
+ break;
+ case PENDING_READ:
+ result = ReadDataImpl(next.index, next.offset, next.buf,
+ next.buf_len, &next, callback_);
+ if (result != net::ERR_IO_PENDING)
+ finished = true;
+ break;
+ case PENDING_WRITE:
+ result = WriteDataImpl(next.index, next.offset, next.buf,
+ next.buf_len, &next, callback_,
+ next.truncate);
+ if (result != net::ERR_IO_PENDING)
+ finished = true;
+ break;
+ case PENDING_CLEANUP:
+ Cleanup();
+ finished = true;
+ break;
+ case PENDING_DONE:
+ finished = true;
+ break;
+ default: NOTREACHED();
+ }
+ if (finished) {
+ next.buf = NULL;
+ if (!next.callback.is_null())
+ next.callback.Run(result);
+ pending_operations_.pop();
+
+ if (pending_operations_.empty()) {
+ if (dirty_ && HasOneRef()) {
+ // One of the pending operations modified this entry after the last
+ // Close... issue an extra cleanup.
+ Cleanup();
+ }
+ break;
+ }
+
+ // Cleanup may issue multiple flushes so there may be multiple pending
+ // callbacks already in flight. Make sure we wait for them.
+ next = pending_operations_.front();
+ DCHECK_NE(next.action, PENDING_FLUSH);
+ if (next.action == PENDING_DONE)
+ break;
+ }
+ }
+}
+
+} // namespace disk_cache
Property changes on: net\disk_cache\v3\entry_impl_v3.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
« no previous file with comments | « net/disk_cache/v3/entry_impl_v3.h ('k') | net/disk_cache/v3/entry_operation.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698