Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2490)

Unified Diff: net/http/http_cache.cc

Issue 455623003: stale-while-revalidate experimental implementation. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: GetFreshnessLifetimes now returns two times. Duplicate AsyncValidations for the same URL are discar… Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: net/http/http_cache.cc
diff --git a/net/http/http_cache.cc b/net/http/http_cache.cc
index bfb21ade1d1edcbba4264b3837cefdc25ac2c9fb..7388e288f0e83c369708a19d4474bb4602c6f215 100644
--- a/net/http/http_cache.cc
+++ b/net/http/http_cache.cc
@@ -21,16 +21,19 @@
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
#include "base/pickle.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/worker_pool.h"
+#include "base/time/time.h"
#include "net/base/cache_type.h"
#include "net/base/io_buffer.h"
#include "net/base/load_flags.h"
#include "net/base/net_errors.h"
+#include "net/base/network_delegate.h"
#include "net/base/upload_data_stream.h"
#include "net/disk_cache/disk_cache.h"
#include "net/http/disk_based_cert_cache.h"
@@ -292,12 +295,145 @@ class HttpCache::QuicServerInfoFactoryAdaptor : public QuicServerInfoFactory {
};
//-----------------------------------------------------------------------------
+
+class HttpCache::AsyncValidation {
+ public:
+ AsyncValidation(const HttpRequestInfo& original_request, HttpCache* cache)
+ : request_(original_request), cache_(cache) {}
+ ~AsyncValidation() {}
+
+ void Start(const BoundNetLog& net_log, NetworkDelegate* network_delegate);
+
+ private:
+ void OnStarted(int result);
+ void DoRead();
+ void OnRead(int result);
+
+ // Terminate this request with net error code |result|. Logs the transaction
+ // result and asks HttpCache to delete this object.
+ // If there was a client or server certificate error, it cannot be recovered
+ // asynchronously, so we need to prevent future attempts to asynchronously
+ // fetch the resource. In this case, the cache entry is doomed.
+ void Terminate(int result);
+
+ HttpRequestInfo request_;
+ scoped_refptr<IOBuffer> buf_;
+ CompletionCallback read_callback_;
+ scoped_ptr<Transaction> transaction_;
+ base::Time start_time_;
+
+ // The HttpCache object owns this object. This object is always deleted before
+ // the pointer to the cache becomes invalid.
+ HttpCache* cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncValidation);
+};
+
+void HttpCache::AsyncValidation::Start(const BoundNetLog& net_log,
+ NetworkDelegate* network_delegate) {
+ scoped_ptr<HttpTransaction> transaction;
+ cache_->CreateTransaction(IDLE, &transaction);
+ if (network_delegate) {
+ // This code is necessary to enable async transactions to pass over the
+ // data-reduction proxy. This is a violation of the "once-and-only-once"
+ // principle, since it copies code from URLRequestHttpJob. We cannot use the
+ // original callback passed to HttpCache::Transaction by URLRequestHttpJob
+ // as it will only be valid as long as the URLRequestHttpJob object is
+ // alive, and that object will be deleted as soon as the synchronous request
+ // completes.
+ //
+ // This code is also an encapsulation violation. We are exploiting the fact
+ // that the |request| parameter to NotifyBeforeSendProxyHeaders() is never
+ // actually used for anything, and so can be NULL.
+ //
+ // TODO(ricea): Do this better.
+ transaction->SetBeforeProxyHeadersSentCallback(
+ base::Bind(&NetworkDelegate::NotifyBeforeSendProxyHeaders,
+ base::Unretained(network_delegate),
+ static_cast<URLRequest*>(NULL)));
+ // The above use of base::Unretained is safe because the NetworkDelegate has
+ // to live at least as long as the HttpNetworkSession which has to live as
+ // least as long as the HttpNetworkLayer which has to live at least as long
+ // this HttpCache object.
+ }
+
+ transaction_.reset(static_cast<Transaction*>(transaction.release()));
+ DCHECK_EQ(0, request_.load_flags & LOAD_ASYNC_REVALIDATION);
+ request_.load_flags |= LOAD_ASYNC_REVALIDATION;
+ start_time_ = base::Time::Now();
+ // This use of base::Unretained is safe because |transaction_| is owned by
+ // this object.
+ read_callback_ = base::Bind(&AsyncValidation::OnRead, base::Unretained(this));
+ // This use of base::Unretained is safe as above.
+ int rv = transaction_->Start(
+ &request_,
+ base::Bind(&AsyncValidation::OnStarted, base::Unretained(this)),
+ net_log);
+
+ if (rv == ERR_IO_PENDING)
+ return;
+
+ OnStarted(rv);
+}
+
+void HttpCache::AsyncValidation::OnStarted(int result) {
+ if (result != OK) {
+ DVLOG(1) << "Asynchronous transaction start failed for " << request_.url;
+ Terminate(result);
+ return;
+ }
+
+ DoRead();
+}
+
+void HttpCache::AsyncValidation::DoRead() {
+ const size_t kBufSize = 4096;
+ if (!buf_)
+ buf_ = new IOBuffer(kBufSize);
+
+ int rv = 0;
+ do {
+ rv = transaction_->Read(buf_.get(), kBufSize, read_callback_);
+ } while (rv > 0);
+
+ if (rv == ERR_IO_PENDING)
+ return;
+
+ OnRead(rv);
+}
+
+void HttpCache::AsyncValidation::OnRead(int result) {
+ if (result > 0) {
+ DoRead();
+ return;
+ }
+ Terminate(result);
+}
+
+void HttpCache::AsyncValidation::Terminate(int result) {
+ if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED || IsCertificateError(result)) {
+ // We should not attempt to access this resource asynchronously again until
+ // the certificate problem has been resolved.
+ // TODO(ricea): Mark the entry as requiring synchronous revalidation rather
+ // than just deleting it.
+ cache_->DoomEntry(transaction_->key(), transaction_.get());
+ }
+ base::TimeDelta duration = base::Time::Now() - start_time_;
+ UMA_HISTOGRAM_TIMES("HttpCache.AsyncValidationDuration", duration);
+ transaction_->net_log().EndEventWithNetErrorCode(
+ NetLog::TYPE_ASYNC_REVALIDATION, result);
+ cache_->DeleteAsyncValidation(request_.url.possibly_invalid_spec());
rvargas (doing something else) 2014/09/11 02:33:06 We should rely one way or another on GenerateCache
Adam Rice 2014/09/12 01:46:48 Done.
+ // |this| is deleted.
+}
+
+//-----------------------------------------------------------------------------
HttpCache::HttpCache(const net::HttpNetworkSession::Params& params,
BackendFactory* backend_factory)
: net_log_(params.net_log),
backend_factory_(backend_factory),
building_backend_(false),
bypass_lock_for_test_(false),
+ use_stale_while_revalidate_(params.use_stale_while_revalidate),
mode_(NORMAL),
network_layer_(new HttpNetworkLayer(new HttpNetworkSession(params))),
weak_factory_(this) {
@@ -313,6 +449,7 @@ HttpCache::HttpCache(HttpNetworkSession* session,
backend_factory_(backend_factory),
building_backend_(false),
bypass_lock_for_test_(false),
+ use_stale_while_revalidate_(session->params().use_stale_while_revalidate),
mode_(NORMAL),
network_layer_(new HttpNetworkLayer(session)),
weak_factory_(this) {
@@ -325,10 +462,14 @@ HttpCache::HttpCache(HttpTransactionFactory* network_layer,
backend_factory_(backend_factory),
building_backend_(false),
bypass_lock_for_test_(false),
+ use_stale_while_revalidate_(false),
mode_(NORMAL),
network_layer_(network_layer),
weak_factory_(this) {
SetupQuicServerInfoFactory(network_layer_->GetSession());
+ HttpNetworkSession* session = network_layer_->GetSession();
+ if (session)
+ use_stale_while_revalidate_ = session->params().use_stale_while_revalidate;
}
HttpCache::~HttpCache() {
@@ -350,6 +491,7 @@ HttpCache::~HttpCache() {
}
STLDeleteElements(&doomed_entries_);
+ STLDeleteValues(&async_validations_);
// Before deleting pending_ops_, we have to make sure that the disk cache is
// done with said operations, or it will attempt to use deleted data.
@@ -1038,6 +1180,38 @@ void HttpCache::ProcessPendingQueue(ActiveEntry* entry) {
base::Bind(&HttpCache::OnProcessPendingQueue, GetWeakPtr(), entry));
}
+void HttpCache::PerformAsyncValidation(const HttpRequestInfo& original_request,
+ const BoundNetLog& net_log) {
+ DCHECK(use_stale_while_revalidate_);
+ std::string spec = original_request.url.possibly_invalid_spec();
+ if (ContainsKey(async_validations_, spec)) {
rvargas (doing something else) 2014/09/11 02:33:06 Avoid searching twice by using insert_ok to detect
Adam Rice 2014/09/12 01:46:48 Sorry, yes. I optimised for the conflict case with
+ DVLOG(1) << "Harmless race condition detected on URL " << spec
+ << "; discarding redundant revalidation.";
+ return;
+ }
+ AsyncValidation* async_validation =
+ new AsyncValidation(original_request, this);
+ typedef AsyncValidationMap::value_type AsyncValidationKeyValue;
+ bool insert_ok =
+ async_validations_.insert(AsyncValidationKeyValue(spec, async_validation))
+ .second;
+ DCHECK(insert_ok);
+ HttpNetworkSession* network_session = GetSession();
+ NetworkDelegate* network_delegate = NULL;
+ if (network_session)
+ network_delegate = network_session->network_delegate();
+ async_validation->Start(net_log, network_delegate);
+ // |async_validation| may have been deleted here.
+}
+
+void HttpCache::DeleteAsyncValidation(const std::string& url) {
+ AsyncValidationMap::iterator it = async_validations_.find(url);
+ CHECK(it != async_validations_.end()); // security-critical invariant
+ AsyncValidation* async_validation = it->second;
+ async_validations_.erase(it);
+ delete async_validation; // |url| is no longer valid.
rvargas (doing something else) 2014/09/11 02:33:06 Is the comment really needed? If so, it needs reph
Adam Rice 2014/09/12 01:46:48 Probably not. I thought it was worth noting as non
+}
+
void HttpCache::OnProcessPendingQueue(ActiveEntry* entry) {
entry->will_process_pending_queue = false;
DCHECK(!entry->writer);

Powered by Google App Engine
This is Rietveld 408576698