Index: net/disk_cache/pending_create_entry.cc |
diff --git a/net/disk_cache/pending_create_entry.cc b/net/disk_cache/pending_create_entry.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..233b13f0bc553ff9dd7a966823490bebc2b645b7 |
--- /dev/null |
+++ b/net/disk_cache/pending_create_entry.cc |
@@ -0,0 +1,278 @@ |
+// Copyright (c) 2013 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "net/disk_cache/pending_create_entry.h" |
+ |
+#include "base/time.h" |
+#include "net/base/net_errors.h" |
+#include "net/disk_cache/disk_cache.h" |
+ |
+namespace disk_cache { |
+ |
+namespace { |
+ |
+// A PendingEntry takes the place of our yet to be created Entry when we perform |
+// a PendingCreateEntry. After a successful create, the new Entry will switch |
+// and takes it place. The PendingEntry will set the output parameter |failed| |
+// to true if it receives any operation we cannot proxy on the new entry. |
+class PendingEntry : public Entry { |
+ public: |
+ PendingEntry(const std::string& key, bool* failed) : key_(key), |
+ failed_(failed) { |
+ *failed_ = false; |
+ } |
+ virtual ~PendingEntry() { |
+ } |
+ |
+ // From disk_cache::Entry: |
+ virtual void Close() OVERRIDE { |
+ // We do not self delete here, because as a member of ProxyEntry we will be |
+ // delete with it. |
+ } |
+ virtual void Doom() OVERRIDE { |
+ } |
+ virtual int WriteData(int index, |
+ int offset, |
+ IOBuffer* buf, |
+ int buf_len, |
+ const CompletionCallback& callback, |
+ bool truncate) OVERRIDE { |
+ VLOG(0) << "NETWORK won for " << key_; |
+ // If we receive WriteData, that's because the create on the disk cache did |
+ // not complete in time to receive data. We fail, which will force |
+ // transactions into pass through mode. |
+ // TODO(gavinp): Investigate the benefit of buffering here. |
+ *failed_ = true; |
+ return net::ERR_CACHE_CREATE_RACE_FAILED; |
+ } |
+ virtual std::string GetKey() const OVERRIDE { |
+ return key_; |
+ } |
+ |
+ // The rest of these operations aren't expected to ever be called, but are |
+ // required for a complete implementation of the abstract base class. |
+ virtual base::Time GetLastUsed() const OVERRIDE { |
+ NOTREACHED(); |
+ return base::Time(); |
+ } |
+ virtual base::Time GetLastModified() const OVERRIDE { |
+ NOTREACHED(); |
+ return base::Time(); |
+ } |
+ virtual int32 GetDataSize(int index) const OVERRIDE { |
+ NOTREACHED(); |
+ return 0; |
+ } |
+ virtual int ReadData(int index, |
+ int offset, |
+ IOBuffer* buf, |
+ int buf_len, |
+ const CompletionCallback& callback) OVERRIDE { |
+ NOTREACHED(); |
+ return net::ERR_CACHE_CREATE_RACE_FAILED; |
+ } |
+ virtual int ReadSparseData(int64 offset, |
+ IOBuffer* buf, |
+ int buf_len, |
+ const CompletionCallback& callback) OVERRIDE { |
+ NOTREACHED(); |
+ return net::ERR_CACHE_CREATE_RACE_FAILED; |
+ } |
+ virtual int WriteSparseData(int64 offset, |
+ IOBuffer* buf, |
+ int buf_len, |
+ const CompletionCallback& callback) OVERRIDE { |
+ NOTREACHED(); |
+ return net::ERR_CACHE_CREATE_RACE_FAILED; |
+ } |
+ virtual int GetAvailableRange(int64 offset, |
+ int len, |
+ int64* start, |
+ const CompletionCallback& callback) OVERRIDE { |
+ NOTREACHED(); |
+ return net::ERR_CACHE_CREATE_RACE_FAILED; |
+ } |
+ virtual bool CouldBeSparse() const OVERRIDE { |
+ NOTREACHED(); |
+ return false; |
+ } |
+ virtual void CancelSparseIO() OVERRIDE { |
+ NOTREACHED(); |
+ } |
+ virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE { |
+ return net::ERR_CACHE_CREATE_RACE_FAILED; |
+ } |
+ |
+ private: |
+ std::string key_; |
+ |
+ // |failed_| is set to true iff we receive an operation we cannot proxy. |
+ bool* failed_; |
+}; |
+ |
+// The ProxyEntry initially proxies for a PendingEntry, and once our CreateEntry |
+// is complete, it swaps it in place if possible. |
+class ProxyEntry : public Entry { |
+ public: |
+ explicit ProxyEntry(const std::string& key); |
+ virtual ~ProxyEntry(); |
+ |
+ Entry* ReleaseCreatedEntry(); |
+ |
+ Entry** to_create_entry_ptr() { return &to_create_entry_; } |
+ const net::CompletionCallback& io_callback() const { return io_callback_; } |
+ |
+ // Entry interface. |
+ virtual void Doom() OVERRIDE { |
+ proxy_entry_->Doom(); |
+ } |
+ virtual void Close() OVERRIDE { |
+ proxy_entry_->Close(); |
+ proxy_entry_ = NULL; |
+ // Normally, for entries that are created uniquely per user we would self |
+ // delete here. However, |to_create_entry_| is still referenced if we have |
+ // not received our io_callback_ yet, so we must wait for the callback to |
+ // self delete. Note that this isn't solved by using a WeakPtr in the |
+ // callback, since |to_create_entry_| is set on the Cache thread outside |
+ // of the callback. |
+ if (io_callback_was_received_) |
+ delete this; |
+ } |
+ virtual std::string GetKey() const OVERRIDE { |
+ return proxy_entry_->GetKey(); |
+ } |
+ virtual base::Time GetLastUsed() const OVERRIDE { |
+ return proxy_entry_->GetLastUsed(); |
+ } |
+ virtual base::Time GetLastModified() const OVERRIDE { |
+ return proxy_entry_->GetLastModified(); |
+ } |
+ virtual int32 GetDataSize(int index) const OVERRIDE { |
+ return proxy_entry_->GetDataSize(index); |
+ } |
+ virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len, |
+ const CompletionCallback& callback) OVERRIDE { |
+ return proxy_entry_->ReadData(index, offset, buf, buf_len, callback); |
+ } |
+ virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len, |
+ const CompletionCallback& callback, |
+ bool truncate) OVERRIDE { |
+ return proxy_entry_->WriteData(index, offset, buf, buf_len, callback, |
+ truncate); |
+ } |
+ virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, |
+ const CompletionCallback& callback) OVERRIDE { |
+ return proxy_entry_->ReadSparseData(offset, buf, buf_len, callback); |
+ } |
+ virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, |
+ const CompletionCallback& callback) OVERRIDE { |
+ return proxy_entry_->WriteSparseData(offset, buf, buf_len, callback); |
+ } |
+ virtual int GetAvailableRange(int64 offset, int len, int64* start, |
+ const CompletionCallback& callback) OVERRIDE { |
+ return proxy_entry_->GetAvailableRange(offset, len, start, callback); |
+ } |
+ virtual bool CouldBeSparse() const OVERRIDE { |
+ return proxy_entry_->CouldBeSparse(); |
+ } |
+ virtual void CancelSparseIO() OVERRIDE { |
+ proxy_entry_->CancelSparseIO(); |
+ } |
+ virtual int ReadyForSparseIO(const CompletionCallback& callback) OVERRIDE { |
+ return proxy_entry_->ReadyForSparseIO(callback); |
+ } |
+ |
+ private: |
+ void OnIOComplete(int result); |
+ |
+ std::string key_; |
+ |
+ // Initially false, |pending_entry_invalid_| is set to true if the |
+ // |pending_entry_| received an operation it could not proxy. |
+ bool pending_entry_invalid_; |
+ |
+ // Before we have created our entry, we use proxy to the pending issue. |
+ PendingEntry pending_entry_; |
+ |
+ bool io_callback_was_received_; |
+ |
+ // When our IO callback is called, |to_create_entry_| will contain our newly |
+ // created entry. |
+ Entry* to_create_entry_; |
+ |
+ // The entry that we are currently a proxy for. |
+ Entry* proxy_entry_; |
+ |
+ net::CompletionCallback io_callback_; |
+}; |
+ |
+ProxyEntry::ProxyEntry(const std::string& key) |
+ : key_(key), |
+ pending_entry_invalid_(false), |
+ pending_entry_(key, &pending_entry_invalid_), |
+ io_callback_was_received_(false), |
+ to_create_entry_(NULL), |
+ proxy_entry_(&pending_entry_), |
+ ALLOW_THIS_IN_INITIALIZER_LIST(io_callback_( |
+ base::Bind(&ProxyEntry::OnIOComplete, base::Unretained(this)))) { |
+} |
+ |
+ProxyEntry::~ProxyEntry() { |
+} |
+ |
+Entry* ProxyEntry::ReleaseCreatedEntry() { |
+ Entry* to_return = to_create_entry_; |
+ to_create_entry_ = NULL; |
+ return to_return; |
+} |
+ |
+void ProxyEntry::OnIOComplete(int result) { |
+ DCHECK(!io_callback_was_received_); |
+ io_callback_was_received_ = true; |
+ if (!proxy_entry_) { |
+ // We are receiving an IO callback after we have already been closed. |
+ // Since |to_create_entry_| is no longer referenced by our pending Create |
+ // operation, we can now perform the self delete delayed from Close(). |
+ VLOG(0) << "Cleaning up after our entry..."; |
+ to_create_entry_->Doom(); |
+ to_create_entry_->Close(); |
+ to_create_entry_ = NULL; |
+ delete this; |
+ return; |
+ } |
+ VLOG(0) << "CACHE WON for " << key_; |
+ if (result == net::OK) { |
+ if (pending_entry_invalid_) { |
+ to_create_entry_->Doom(); |
+ to_create_entry_->Close(); |
+ to_create_entry_ = NULL; |
+ return; |
+ } |
+ // We have successfully created our entry before any invalid operations |
+ // occured. We can swap it in and become a pure proxy. |
+ proxy_entry_ = to_create_entry_; |
+ to_create_entry_ = NULL; |
+ } |
+} |
+ |
+} // namespace |
+ |
+int PendingCreateEntry(Backend* backend, |
+ const std::string& key, |
+ Entry** entry) { |
+ VLOG(0) << "PendingCreateEntry"; |
+ ProxyEntry* proxy_entry = new ProxyEntry(key); |
+ int rv = backend->CreateEntry(key, proxy_entry->to_create_entry_ptr(), |
+ proxy_entry->io_callback()); |
+ VLOG(0) << "rv = " << rv; |
+ if (rv == net::ERR_IO_PENDING) { |
+ *entry = proxy_entry; |
+ return net::OK; |
+ } |
+ if (rv == net::OK) |
+ *entry = proxy_entry->ReleaseCreatedEntry(); |
+ return rv; |
+} |
+ |
+} // namespace disk_cache |