Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(179)

Unified Diff: net/http/http_cache_transaction.cc

Issue 2519473002: Fixes the cache lock issue. (Closed)
Patch Set: Feedback addressed Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/http/http_cache_transaction.h ('k') | net/http/http_cache_unittest.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/http/http_cache_transaction.cc
diff --git a/net/http/http_cache_transaction.cc b/net/http/http_cache_transaction.cc
index 24002d75215d56bcc5081c22af3d4369806159e8..0badc978179ab719b48efb2bcba4b15f7ee5fe6e 100644
--- a/net/http/http_cache_transaction.cc
+++ b/net/http/http_cache_transaction.cc
@@ -11,7 +11,9 @@
#endif
#include <algorithm>
+#include <limits>
#include <string>
+#include <utility>
#include "base/bind.h"
#include "base/callback_helpers.h"
@@ -38,6 +40,7 @@
#include "net/cert/cert_status_flags.h"
#include "net/cert/x509_certificate.h"
#include "net/disk_cache/disk_cache.h"
+#include "net/http/http_cache_shared_writers.h"
#include "net/http/http_network_session.h"
#include "net/http/http_request_info.h"
#include "net/http/http_util.h"
@@ -181,6 +184,9 @@ HttpCache::Transaction::Transaction(RequestPriority priority, HttpCache* cache)
couldnt_conditionalize_request_(false),
bypass_lock_for_test_(false),
fail_conditionalization_for_test_(false),
+ shared_(false),
+ initiate_shared_writing_(false),
+ shared_read_write_failure_result_(0),
io_buf_len_(0),
read_offset_(0),
effective_load_flags_(0),
@@ -190,6 +196,7 @@ HttpCache::Transaction::Transaction(RequestPriority priority, HttpCache* cache)
total_received_bytes_(0),
total_sent_bytes_(0),
websocket_handshake_stream_base_create_helper_(NULL),
+ have_full_request_headers_(false),
weak_factory_(this) {
TRACE_EVENT0("io", "HttpCacheTransaction::Transaction");
static_assert(HttpCache::Transaction::kNumValidationHeaders ==
@@ -202,10 +209,17 @@ HttpCache::Transaction::Transaction(RequestPriority priority, HttpCache* cache)
HttpCache::Transaction::~Transaction() {
TRACE_EVENT0("io", "HttpCacheTransaction::~Transaction");
+
// We may have to issue another IO, but we should never invoke the callback_
// after this point.
callback_.Reset();
+ if (shared_) {
+ // Remove transaction from shared writers and do any cleanup, if needed, for
+ // entry_->shared_writers and entry_.
+ RemoveTransactionFromSharedWriters();
+ }
+
if (cache_) {
if (entry_) {
bool cancel_request = reading_ && response_.headers.get();
@@ -216,7 +230,6 @@ HttpCache::Transaction::~Transaction() {
cancel_request &= (response_.headers->response_code() == 200);
}
}
-
cache_->DoneWithEntry(entry_, this, cancel_request);
} else if (cache_pending_) {
cache_->RemovePendingTransaction(this);
@@ -224,6 +237,59 @@ HttpCache::Transaction::~Transaction() {
}
}
+void HttpCache::Transaction::RemoveTransactionFromSharedWriters() {
+ switch (next_state_) {
+ case STATE_GET_BACKEND_COMPLETE:
+ case STATE_OPEN_ENTRY_COMPLETE:
+ case STATE_DOOM_ENTRY_COMPLETE:
+ case STATE_CREATE_ENTRY_COMPLETE:
+ case STATE_ADD_TO_ENTRY_COMPLETE:
+ // Pending transaction not yet added to an entry, so entry_ is null, let
+ // cache_ handle the deletion.
+ cache_->RemovePendingTransaction(this);
+ // Set cache_pending_ to false so that more cleanup is not attempted in
+ // the destructor.
+ cache_pending_ = false;
+ break;
+ case STATE_CACHE_READ_RESPONSE_COMPLETE:
+ case STATE_TOGGLE_UNUSED_SINCE_PREFETCH_COMPLETE:
+ case STATE_CACHE_QUERY_DATA_COMPLETE:
+ case STATE_COMPLETE_PARTIAL_CACHE_VALIDATION:
+ case STATE_SEND_REQUEST_COMPLETE:
+ case STATE_CACHE_WRITE_UPDATED_RESPONSE_COMPLETE:
+ case STATE_UPDATE_CACHED_RESPONSE_COMPLETE:
+ case STATE_CACHE_WRITE_RESPONSE_COMPLETE:
+ case STATE_TRUNCATE_CACHED_DATA_COMPLETE:
+ case STATE_TRUNCATE_CACHED_METADATA_COMPLETE:
+ case STATE_CACHE_READ_METADATA_COMPLETE:
+ entry_->shared_writers->RemoveValidatingTransaction(this);
+ break;
+ case STATE_SHARED_NETWORK_READ_COMPLETE:
+ case STATE_SHARED_CACHE_WRITE_DATA_COMPLETE:
+ // Current shared network transaction's consumer.
+ entry_->shared_writers->RemoveActiveTransaction(this);
+ break;
+ case STATE_SHARED_NETWORK_READ_WAIT_COMPLETE:
+ // Waiting on Read() operation to be completed by another
+ // transaction.
+ entry_->shared_writers->RemoveWaitingForReadTransaction(this);
+ break;
+ case STATE_CACHE_READ_DATA_COMPLETE:
+ case STATE_NONE:
+ entry_->shared_writers->RemoveIdleTransaction(this);
+ break;
+ default:
+ NOTREACHED() << "bad state";
+ break;
+ }
+
+ DCHECK(!shared_);
+
+ // Since any cleanup needed in entry_ and shared_writers is already done,
+ // set entry_ to null.
+ entry_ = nullptr;
+}
+
int HttpCache::Transaction::WriteMetadata(IOBuffer* buf, int buf_len,
const CompletionCallback& callback) {
DCHECK(buf);
@@ -247,7 +313,7 @@ bool HttpCache::Transaction::AddTruncatedFlag() {
if (partial_ && !truncated_)
return true;
- if (!CanResume(true))
+ if (!cache_->CanResumeEntry(true, request_->method, &response_, entry_))
return false;
// We may have received the whole resource already.
@@ -261,8 +327,9 @@ bool HttpCache::Transaction::AddTruncatedFlag() {
}
LoadState HttpCache::Transaction::GetWriterLoadState() const {
- if (network_trans_.get())
- return network_trans_->GetLoadState();
+ HttpTransaction* network_transaction = GetCurrentNetworkTransaction();
+ if (network_transaction)
+ return network_transaction->GetLoadState();
if (entry_ || !request_)
return LOAD_STATE_IDLE;
return LOAD_STATE_WAITING_FOR_CACHE;
@@ -372,6 +439,15 @@ bool HttpCache::Transaction::IsReadyToRestartForAuth() {
int HttpCache::Transaction::Read(IOBuffer* buf, int buf_len,
const CompletionCallback& callback) {
+ if (next_state_ == STATE_SHARED_READ_WRITE_FAILED) {
+ if (net_log_.IsCapturing())
+ net_log_.EndEventWithNetErrorCode(
+ NetLogEventType::HTTP_CACHE_SHARED_READ_WRITE_FAILED,
+ shared_read_write_failure_result_);
+
+ return shared_read_write_failure_result_;
+ }
+
DCHECK_EQ(next_state_, STATE_NONE);
DCHECK(buf);
DCHECK_GT(buf_len, 0);
@@ -395,13 +471,25 @@ int HttpCache::Transaction::Read(IOBuffer* buf, int buf_len,
reading_ = true;
read_buf_ = buf;
io_buf_len_ = buf_len;
- if (network_trans_) {
- DCHECK(mode_ == WRITE || mode_ == NONE ||
- (mode_ == READ_WRITE && partial_));
- next_state_ = STATE_NETWORK_READ;
- } else {
- DCHECK(mode_ == READ || (mode_ == READ_WRITE && partial_));
- next_state_ = STATE_CACHE_READ_DATA;
+
+ if (shared_) {
+ int disk_entry_size =
+ entry_->disk_entry->GetDataSize(kResponseContentIndex);
+ if (read_offset_ == disk_entry_size) {
+ next_state_ = STATE_SHARED_NETWORK_READ;
+ } else {
+ DCHECK_LT(read_offset_, disk_entry_size);
+ next_state_ = STATE_CACHE_READ_DATA;
+ }
+ } else { // not shared.
+ if (network_trans_) {
+ DCHECK(mode_ == WRITE || mode_ == NONE ||
+ (mode_ == READ_WRITE && partial_));
+ next_state_ = STATE_NETWORK_READ;
+ } else { // read-only
+ DCHECK(mode_ == READ || (mode_ == READ_WRITE && partial_));
+ next_state_ = STATE_CACHE_READ_DATA;
+ }
}
int rv = DoLoop(OK);
@@ -425,40 +513,62 @@ void HttpCache::Transaction::StopCaching() {
// TODO(mmenke): This doesn't release the lock on the cache entry, so a
// future request for the resource will be blocked on this one.
// Fix this.
- if (cache_.get() && entry_ && (mode_ & WRITE) && network_trans_.get() &&
- !is_sparse_ && !range_requested_) {
+ if (shared_) {
+ // This might or might not stop caching based on whether other consumers
+ // exist for this resource or not. If it does, shared_ will be set to false.
+ entry_->shared_writers->StopCaching(this);
+ }
+ if (!shared_ && cache_.get() && entry_ && (mode_ & WRITE) &&
+ network_trans_.get() && !is_sparse_ && !range_requested_) {
mode_ = NONE;
}
}
bool HttpCache::Transaction::GetFullRequestHeaders(
HttpRequestHeaders* headers) const {
- if (network_trans_)
- return network_trans_->GetFullRequestHeaders(headers);
-
+ HttpTransaction* network_transaction = GetCurrentNetworkTransaction();
+ if (network_transaction) {
+ return network_transaction->GetFullRequestHeaders(headers);
+ } else if (have_full_request_headers_) {
+ *headers = full_request_headers_;
+ return true;
+ }
// TODO(juliatuttle): Read headers from cache.
return false;
}
int64_t HttpCache::Transaction::GetTotalReceivedBytes() const {
int64_t total_received_bytes = total_received_bytes_;
- if (network_trans_)
- total_received_bytes += network_trans_->GetTotalReceivedBytes();
+ HttpTransaction* network_transaction = GetCurrentNetworkTransaction();
+ if (network_transaction)
+ total_received_bytes += network_transaction->GetTotalReceivedBytes();
return total_received_bytes;
}
int64_t HttpCache::Transaction::GetTotalSentBytes() const {
int64_t total_sent_bytes = total_sent_bytes_;
- if (network_trans_)
- total_sent_bytes += network_trans_->GetTotalSentBytes();
+ HttpTransaction* network_transaction = GetCurrentNetworkTransaction();
+ if (network_transaction)
+ total_sent_bytes += network_transaction->GetTotalSentBytes();
return total_sent_bytes;
}
void HttpCache::Transaction::DoneReading() {
if (cache_.get() && entry_) {
DCHECK_NE(mode_, UPDATE);
+ bool perform_entry_cleanup = true;
+ if (shared_) {
+ entry_->shared_writers->DoneReading(this);
+
+ // shared_ should have been set to false.
+ DCHECK(!shared_);
+
+ DoneWritingToEntry(true, false);
+ return;
+ }
+
if (mode_ & WRITE) {
- DoneWritingToEntry(true);
+ DoneWritingToEntry(true, perform_entry_cleanup);
} else if (mode_ & READ) {
// It is necessary to check mode_ & READ because it is possible
// for mode_ to be NONE and entry_ non-NULL with a write entry
@@ -497,8 +607,9 @@ void HttpCache::Transaction::SetQuicServerInfo(
bool HttpCache::Transaction::GetLoadTimingInfo(
LoadTimingInfo* load_timing_info) const {
- if (network_trans_)
- return network_trans_->GetLoadTimingInfo(load_timing_info);
+ HttpTransaction* network_transaction = GetCurrentNetworkTransaction();
+ if (network_transaction)
+ return network_transaction->GetLoadTimingInfo(load_timing_info);
if (old_network_trans_load_timing_) {
*load_timing_info = *old_network_trans_load_timing_;
@@ -517,8 +628,9 @@ bool HttpCache::Transaction::GetLoadTimingInfo(
}
bool HttpCache::Transaction::GetRemoteEndpoint(IPEndPoint* endpoint) const {
- if (network_trans_)
- return network_trans_->GetRemoteEndpoint(endpoint);
+ HttpTransaction* network_transaction = GetCurrentNetworkTransaction();
+ if (network_transaction)
+ return network_transaction->GetRemoteEndpoint(endpoint);
if (!old_remote_endpoint_.address().empty()) {
*endpoint = old_remote_endpoint_;
@@ -530,8 +642,9 @@ bool HttpCache::Transaction::GetRemoteEndpoint(IPEndPoint* endpoint) const {
void HttpCache::Transaction::PopulateNetErrorDetails(
NetErrorDetails* details) const {
- if (network_trans_)
- return network_trans_->PopulateNetErrorDetails(details);
+ HttpTransaction* network_transaction = GetCurrentNetworkTransaction();
+ if (network_transaction)
+ network_transaction->PopulateNetErrorDetails(details);
return;
}
@@ -539,6 +652,8 @@ void HttpCache::Transaction::SetPriority(RequestPriority priority) {
priority_ = priority;
if (network_trans_)
network_trans_->SetPriority(priority_);
+ else if (shared_ && entry_ && entry_->shared_writers)
+ entry_->shared_writers->PriorityChanged();
}
void HttpCache::Transaction::SetWebSocketHandshakeStreamCreateHelper(
@@ -569,8 +684,9 @@ int HttpCache::Transaction::ResumeNetworkStart() {
void HttpCache::Transaction::GetConnectionAttempts(
ConnectionAttempts* out) const {
ConnectionAttempts new_connection_attempts;
- if (network_trans_)
- network_trans_->GetConnectionAttempts(&new_connection_attempts);
+ HttpTransaction* network_transaction = GetCurrentNetworkTransaction();
+ if (network_transaction)
+ network_transaction->GetConnectionAttempts(&new_connection_attempts);
out->swap(new_connection_attempts);
out->insert(out->begin(), old_connection_attempts_.begin(),
@@ -588,18 +704,29 @@ void HttpCache::Transaction::GetConnectionAttempts(
// CacheWriteResponse* -> TruncateCachedData* -> TruncateCachedMetadata* ->
// PartialHeadersReceived
//
-// Read():
+// Read(): (For transactions that are not eligible for shared writing)
// NetworkRead* -> CacheWriteData*
//
+// Read(): (For transactions that are eligible for shared writing)
+// SharedNetworkRead* -> SharedCacheWriteData*
+//
+// Read(): (For a transaction that is shared and another read is already in
+// progress)
+// SharedNetworkRead -> SharedNetworkReadWaitComplete
+//
// 2. Cached entry, no validation:
// Start():
// GetBackend* -> InitEntry -> OpenEntry* -> AddToEntry* -> CacheReadResponse*
// -> CacheDispatchValidation -> BeginPartialCacheValidation() ->
// BeginCacheValidation() -> SetupEntryForRead()
//
-// Read():
+// Read(): (When response is already written to the cache.)
// CacheReadData*
//
+// Read(): (When response is currently being written to the cache by shared
+// writing.)
+// SharedNetworkRead* -> SharedCacheWriteData*
+//
// 3. Cached entry, validation (304):
// Start():
// GetBackend* -> InitEntry -> OpenEntry* -> AddToEntry* -> CacheReadResponse*
@@ -609,9 +736,13 @@ void HttpCache::Transaction::GetConnectionAttempts(
// UpdateCachedResponseComplete -> OverwriteCachedResponse ->
// PartialHeadersReceived
//
-// Read():
+// Read(): () (When response is already written to the cache.)
// CacheReadData*
//
+// Read(): (When response is currently being written to the cache by shared
+// writing.)
+// SharedNetworkRead* -> SharedCacheWriteData*
+//
// 4. Cached entry, validation and replace (200):
// Start():
// GetBackend* -> InitEntry -> OpenEntry* -> AddToEntry* -> CacheReadResponse*
@@ -873,6 +1004,25 @@ int HttpCache::Transaction::DoLoop(int result) {
case STATE_CACHE_WRITE_TRUNCATED_RESPONSE_COMPLETE:
rv = DoCacheWriteTruncatedResponseComplete(rv);
break;
+ case STATE_SHARED_NETWORK_READ:
+ DCHECK_EQ(OK, rv);
+ rv = DoSharedNetworkRead();
+ break;
+ case STATE_SHARED_NETWORK_READ_COMPLETE:
+ rv = DoSharedNetworkReadComplete(rv);
+ break;
+ case STATE_SHARED_NETWORK_READ_WAIT_COMPLETE:
+ rv = DoSharedNetworkReadWaitComplete(rv);
+ break;
+ case STATE_SHARED_CACHE_WRITE_DATA:
+ rv = DoSharedCacheWriteData(rv);
+ break;
+ case STATE_SHARED_CACHE_WRITE_DATA_COMPLETE:
+ rv = DoSharedCacheWriteDataComplete(rv);
+ break;
+ case STATE_SHARED_READ_WRITE_FAILED:
+ rv = DoSharedReadWriteFailed();
+ break;
default:
NOTREACHED() << "bad state";
rv = ERR_FAILED;
@@ -1510,6 +1660,26 @@ int HttpCache::Transaction::DoSuccessfulSendRequest() {
return OK;
}
+ // If the transaction is shared and it is a 200 or an error response, then
+ // doom the entry and let this transaction continue without writing to the
+ // cache if shared writers contain more transactions. If not, continue
+ // writing to the cache and also transfer the network transaction to shared
+ // writers.
+ if (shared_ && new_response->headers->response_code() != 304) {
+ network_trans_ = entry_->shared_writers->OnValidationNoMatch(
+ cache_key_, this, std::move(network_trans_), priority_);
+ if (!shared_) {
+ DCHECK(network_trans_);
+ if (cache_entry_status_ == CacheEntryStatus::ENTRY_UNDEFINED) {
+ UpdateCacheEntryStatus(CacheEntryStatus::ENTRY_DOOM_SHARED_WRITING);
+ }
+ DoneWritingToEntry(false, false);
+ return OK;
+ } else {
+ DCHECK(!network_trans_);
+ }
+ }
+
// Are we expecting a response to a conditional query?
if (mode_ == READ_WRITE || mode_ == UPDATE) {
if (new_response->headers->response_code() == 304 || handling_206_) {
@@ -1632,7 +1802,8 @@ int HttpCache::Transaction::DoOverwriteCachedResponse() {
return OK;
}
- if (handling_206_ && !CanResume(false)) {
+ if (handling_206_ &&
+ !cache_->CanResumeEntry(false, request_->method, &response_, entry_)) {
// There is no point in storing this resource because it will never be used.
// This may change if we support LOAD_ONLY_FROM_CACHE with sparse entries.
DoneWritingToEntry(false);
@@ -1658,6 +1829,21 @@ int HttpCache::Transaction::DoCacheWriteResponseComplete(int result) {
return OnWriteResponseInfoToEntryComplete(result);
}
+void HttpCache::Transaction::SetShared() {
+ shared_ = true;
+}
+
+void HttpCache::Transaction::ResetShared(bool continue_network_reading,
+ bool continue_cache_reading) {
+ if (!continue_network_reading) {
+ SaveSharedNetworkTransactionInfo();
+ }
+ if (continue_cache_reading) {
+ mode_ = READ;
+ }
+ shared_ = false;
+}
+
int HttpCache::Transaction::DoTruncateCachedData() {
TRACE_EVENT0("io", "HttpCacheTransaction::DoTruncateCachedData");
next_state_ = STATE_TRUNCATE_CACHED_DATA_COMPLETE;
@@ -1706,11 +1892,58 @@ int HttpCache::Transaction::DoTruncateCachedMetadataComplete(int result) {
return OK;
}
+void HttpCache::Transaction::ProcessForSharedWriting() {
+ // Should not be already reading.
+ if (reading_)
+ return;
+
+ // If not already part of SharedWriters, then check if one should be
+ // created.
+ if (!shared_ && !IsEligibleForSharedWriting())
+ return;
+
+ if (shared_) {
+ // Non 304 case is already handled in DoSuccessfulSendRequest.
+ if (response_.headers->response_code() != 304) {
+ return;
+ }
+ UpdateCacheEntryStatus(CacheEntryStatus::ENTRY_JOIN_SHARED_WRITING);
+ entry_->shared_writers->OnValidationMatch(this, priority_);
+ network_trans_.reset();
+ return;
+ }
+
+ // Do not create a SharedWriters if its a Redirect response or if its a
+ // no-store response.
+ if (response_.headers->response_code() != 200)
+ return;
+
+ if (!entry_)
+ return;
+
+ DCHECK(cache_entry_status_ == CacheEntryStatus::ENTRY_NOT_IN_CACHE ||
+ cache_entry_status_ == CacheEntryStatus::ENTRY_UPDATED ||
+ cache_entry_status_ == CacheEntryStatus::ENTRY_CANT_CONDITIONALIZE ||
+ cache_entry_status_ == CacheEntryStatus::ENTRY_OTHER);
+ // Measure how many requests out of the total of "not cached" and "updated"
+ // are initiating shared writing.
+ initiate_shared_writing_ = true;
+ DCHECK(entry_ && network_trans_);
+
+ // An instance of SharedWriters will be created when the first writer has
+ // written the new response headers in the cache. Transfer network
+ // transaction’s ownership to SharedWriters so it can be used by any of the
+ // transactions for subsequent reading from the network.
+ SharedWriters::Create(this, std::move(network_trans_), cache_, priority_);
+}
+
int HttpCache::Transaction::DoPartialHeadersReceived() {
new_response_ = NULL;
if (entry_ && !partial_ && entry_->disk_entry->GetDataSize(kMetadataIndex))
next_state_ = STATE_CACHE_READ_METADATA;
+ ProcessForSharedWriting();
+
if (!partial_)
return OK;
@@ -1761,6 +1994,10 @@ int HttpCache::Transaction::DoNetworkRead() {
int HttpCache::Transaction::DoNetworkReadComplete(int result) {
TRACE_EVENT0("io", "HttpCacheTransaction::DoNetworkReadComplete");
+ if (net_log_.IsCapturing())
+ net_log_.EndEventWithNetErrorCode(
+ NetLogEventType::HTTP_CACHE_NETWORK_READ_COMPLETE, result);
+
DCHECK(mode_ & WRITE || mode_ == NONE);
if (!cache_.get())
@@ -1775,6 +2012,99 @@ int HttpCache::Transaction::DoNetworkReadComplete(int result) {
return result;
}
+int HttpCache::Transaction::DoSharedNetworkRead() {
+ TRACE_EVENT0("io", "HttpCacheTransaction::DoSharedNetworkRead");
+ if (net_log_.IsCapturing())
+ net_log_.BeginEvent(NetLogEventType::HTTP_CACHE_SHARED_NETWORK_READ);
+
+ next_state_ = STATE_SHARED_NETWORK_READ_COMPLETE;
+ bool shared_read_in_progress = false;
+ int result = entry_->shared_writers->Read(
+ read_buf_, io_buf_len_, io_callback_, this, &shared_read_in_progress);
+ if (shared_read_in_progress) {
+ next_state_ = STATE_SHARED_NETWORK_READ_WAIT_COMPLETE;
+ }
+ return result;
+}
+
+int HttpCache::Transaction::DoSharedNetworkReadComplete(int result) {
+ TRACE_EVENT0("io", "HttpCacheTransaction::DoSharedNetworkReadComplete");
+ if (net_log_.IsCapturing())
+ net_log_.EndEventWithNetErrorCode(
+ NetLogEventType::HTTP_CACHE_SHARED_NETWORK_READ_COMPLETE, result);
+
+ if (result <= 0) {
+ // Set entry as null so that the destructor does not invoke DoneWithEntry
+ // again as entry_ is already cleaned up by SharedWriters.
+ entry_ = nullptr;
+ return result;
+ }
+
+ read_offset_ += result;
+
+ next_state_ = STATE_SHARED_CACHE_WRITE_DATA;
+ return result;
+}
+
+int HttpCache::Transaction::DoSharedCacheWriteData(int result) {
+ TRACE_EVENT0("io", "HttpCacheTransaction::DoSharedCacheWriteData");
+ if (net_log_.IsCapturing())
+ net_log_.BeginEvent(NetLogEventType::HTTP_CACHE_SHARED_CACHE_WRITE_DATA);
+
+ next_state_ = STATE_SHARED_CACHE_WRITE_DATA_COMPLETE;
+ write_len_ = result;
+ return entry_->shared_writers->CacheWrite(read_buf_, write_len_, io_callback_,
+ this);
+}
+
+int HttpCache::Transaction::DoSharedCacheWriteDataComplete(int result) {
+ TRACE_EVENT0("io", "HttpCacheTransaction::DoSharedCacheWriteDataComplete");
+ if (net_log_.IsCapturing())
+ net_log_.EndEventWithNetErrorCode(
+ NetLogEventType::HTTP_CACHE_SHARED_CACHE_WRITE_DATA_COMPLETE, result);
+
+ OnCacheWriteDataComplete(true, &result);
+
+ if (result == 0) {
+ DoneWritingToEntry(true, false);
+ }
+ return result;
+}
+
+void HttpCache::Transaction::OnCacheWriteDataComplete(bool was_shared,
+ int* result) {
+ if (*result != write_len_) {
+ DoneWritingToEntry(false, !was_shared);
+ // We want to ignore errors writing to disk and just keep reading from
+ // the network.
+ *result = write_len_;
+ } else if (!done_reading_ && entry_ && (!partial_ || truncated_)) {
+ done_reading_ = cache_->IsResponseCompleted(entry_, &response_);
+ }
+}
+
+int HttpCache::Transaction::DoSharedNetworkReadWaitComplete(int result) {
+ TRACE_EVENT0("io", "HttpCacheTransaction::DoSharedNetworkReadWaitComplete");
+ if (net_log_.IsCapturing())
+ net_log_.EndEventWithNetErrorCode(
+ NetLogEventType::HTTP_CACHE_SHARED_NETWORK_READ_WAIT_COMPLETE, result);
+
+ // If its a network read failure or cache write failure, we just return the
+ // result. If its a cache write success, read_buf_ would have been filled
+ // with the read data by SharedWriters.
+ if (result == 0) { // response is complete.
+ DoneWritingToEntry(true, false); // changes the mode_ to NONE.
+ } else if (result > 0) {
+ read_offset_ += result;
+ } else {
+ // Set entry as null so that the destructor does not invoke DoneWithEntry
+ // again as entry_ is already cleaned up by SharedWriters.
+ entry_ = nullptr;
+ }
+
+ return result;
+}
+
int HttpCache::Transaction::DoCacheReadData() {
TRACE_EVENT0("io", "HttpCacheTransaction::DoCacheReadData");
if (request_->method == "HEAD")
@@ -1836,9 +2166,15 @@ int HttpCache::Transaction::DoCacheWriteData(int num_bytes) {
if (!entry_ || !num_bytes)
return num_bytes;
+ if (partial_) {
+ return partial_->CacheWrite(entry_->disk_entry, read_buf_.get(), num_bytes,
+ io_callback_);
+ }
+
int current_size = entry_->disk_entry->GetDataSize(kResponseContentIndex);
- return WriteToEntry(kResponseContentIndex, current_size, read_buf_.get(),
- num_bytes, io_callback_);
+ return entry_->disk_entry->WriteData(kResponseContentIndex, current_size,
+ read_buf_.get(), num_bytes, io_callback_,
+ true);
}
int HttpCache::Transaction::DoCacheWriteDataComplete(int result) {
@@ -1852,19 +2188,7 @@ int HttpCache::Transaction::DoCacheWriteDataComplete(int result) {
if (!cache_.get())
return ERR_UNEXPECTED;
- if (result != write_len_) {
- DLOG(ERROR) << "failed to write response data to cache";
- DoneWritingToEntry(false);
-
- // We want to ignore errors writing to disk and just keep reading from
- // the network.
- result = write_len_;
- } else if (!done_reading_ && entry_ && (!partial_ || truncated_)) {
- int current_size = entry_->disk_entry->GetDataSize(kResponseContentIndex);
- int64_t body_size = response_.headers->GetContentLength();
- if (body_size >= 0 && body_size <= current_size)
- done_reading_ = true;
- }
+ OnCacheWriteDataComplete(false, &result);
if (partial_) {
// This may be the last request.
@@ -1874,13 +2198,12 @@ int HttpCache::Transaction::DoCacheWriteDataComplete(int result) {
}
}
+ // End of file. This may be the result of a connection problem so see if we
+ // have to keep the entry around to be flagged as truncated later on.
if (result == 0) {
- // End of file. This may be the result of a connection problem so see if we
- // have to keep the entry around to be flagged as truncated later on.
if (done_reading_ || !entry_ || partial_ ||
- response_.headers->GetContentLength() <= 0) {
+ response_.headers->GetContentLength() <= 0)
DoneWritingToEntry(true);
- }
}
return result;
@@ -1898,6 +2221,15 @@ int HttpCache::Transaction::DoCacheWriteTruncatedResponseComplete(int result) {
return OnWriteResponseInfoToEntryComplete(result);
}
+int HttpCache::Transaction::DoSharedReadWriteFailed() {
+ if (net_log_.IsCapturing())
+ net_log_.EndEventWithNetErrorCode(
+ NetLogEventType::HTTP_CACHE_SHARED_READ_WRITE_FAILED,
+ shared_read_write_failure_result_);
+
+ return shared_read_write_failure_result_;
+}
+
//-----------------------------------------------------------------------------
void HttpCache::Transaction::SetRequest(const NetLogWithSource& net_log,
@@ -2091,8 +2423,13 @@ int HttpCache::Transaction::BeginCacheValidation() {
}
if (skip_validation) {
- UpdateCacheEntryStatus(CacheEntryStatus::ENTRY_USED);
- return SetupEntryForRead();
+ if (shared_) {
+ UpdateCacheEntryStatus(CacheEntryStatus::ENTRY_JOIN_SHARED_WRITING);
+ entry_->shared_writers->OnValidationMatch(this, priority_);
+ } else {
+ UpdateCacheEntryStatus(CacheEntryStatus::ENTRY_USED);
+ return SetupEntryForRead();
+ }
} else {
// Make the network request conditional, to see if we may reuse our cached
// response. If we cannot do so, then we just resort to a normal fetch.
@@ -2611,15 +2948,8 @@ int HttpCache::Transaction::WriteResponseInfoToEntry(bool truncated) {
if (truncated)
DCHECK_EQ(200, response_.headers->response_code());
- // When writing headers, we normally only write the non-transient headers.
- bool skip_transient_headers = true;
- scoped_refptr<PickledIOBuffer> data(new PickledIOBuffer());
- response_.Persist(data->pickle(), skip_transient_headers, truncated);
- data->Done();
-
- io_buf_len_ = data->pickle()->size();
- return entry_->disk_entry->WriteData(kResponseInfoIndex, 0, data.get(),
- io_buf_len_, io_callback_, true);
+ return cache_->WriteResponseInfo(entry_, &response_, io_callback_, truncated,
+ &io_buf_len_);
}
int HttpCache::Transaction::OnWriteResponseInfoToEntryComplete(int result) {
@@ -2637,17 +2967,30 @@ int HttpCache::Transaction::OnWriteResponseInfoToEntryComplete(int result) {
return OK;
}
-void HttpCache::Transaction::DoneWritingToEntry(bool success) {
+void HttpCache::Transaction::DoneWritingToEntry(bool success,
+ bool perform_entry_cleanup) {
if (!entry_)
return;
RecordHistograms();
- cache_->DoneWritingToEntry(entry_, success);
- entry_ = NULL;
+ if (perform_entry_cleanup) {
+ cache_->DoneWritingToEntry(entry_, success);
+ }
+ entry_ = nullptr;
mode_ = NONE; // switch to 'pass through' mode
}
+void HttpCache::Transaction::ContinueWithoutSharedWriting(
+ std::unique_ptr<HttpTransaction> network_transaction,
+ bool needs_entry) {
+ shared_ = false;
+ if (!needs_entry)
+ entry_ = nullptr;
+ mode_ = NONE;
+ network_trans_ = std::move(network_transaction);
+}
+
int HttpCache::Transaction::OnCacheReadError(int result, bool restart) {
DLOG(ERROR) << "ReadData failed: " << result;
const int result_for_histogram = std::max(0, -result);
@@ -2666,7 +3009,11 @@ int HttpCache::Transaction::OnCacheReadError(int result, bool restart) {
if (restart) {
DCHECK(!reading_);
DCHECK(!network_trans_.get());
- cache_->DoneWithEntry(entry_, this, false);
+ if (shared_) {
+ entry_->shared_writers->RemoveValidatingTransaction(this);
+ } else {
+ cache_->DoneWithEntry(entry_, this, false);
+ }
entry_ = NULL;
is_sparse_ = false;
partial_.reset();
@@ -2691,6 +3038,8 @@ void HttpCache::Transaction::OnAddToEntryTimeout(base::TimeTicks start_time) {
}
void HttpCache::Transaction::DoomPartialEntry(bool delete_object) {
+ // Partial requests not supported with shared writing as of now.
+ DCHECK(!shared_);
DVLOG(2) << "DoomPartialEntry";
int rv = cache_->DoomEntry(cache_key_, NULL);
DCHECK_EQ(OK, rv);
@@ -2751,53 +3100,56 @@ void HttpCache::Transaction::ResetPartialState(bool delete_object) {
}
}
-void HttpCache::Transaction::ResetNetworkTransaction() {
+void HttpCache::Transaction::SaveNetworkTransactionInfo(
+ const HttpTransaction* transaction) {
DCHECK(!old_network_trans_load_timing_);
- DCHECK(network_trans_);
+ DCHECK(transaction);
LoadTimingInfo load_timing;
- if (network_trans_->GetLoadTimingInfo(&load_timing))
+ if (transaction->GetLoadTimingInfo(&load_timing))
old_network_trans_load_timing_.reset(new LoadTimingInfo(load_timing));
- total_received_bytes_ += network_trans_->GetTotalReceivedBytes();
- total_sent_bytes_ += network_trans_->GetTotalSentBytes();
+ total_received_bytes_ += transaction->GetTotalReceivedBytes();
+ total_sent_bytes_ += transaction->GetTotalSentBytes();
ConnectionAttempts attempts;
- network_trans_->GetConnectionAttempts(&attempts);
+ transaction->GetConnectionAttempts(&attempts);
for (const auto& attempt : attempts)
old_connection_attempts_.push_back(attempt);
old_remote_endpoint_ = IPEndPoint();
- network_trans_->GetRemoteEndpoint(&old_remote_endpoint_);
- network_trans_.reset();
+ transaction->GetRemoteEndpoint(&old_remote_endpoint_);
}
-// Histogram data from the end of 2010 show the following distribution of
-// response headers:
-//
-// Content-Length............... 87%
-// Date......................... 98%
-// Last-Modified................ 49%
-// Etag......................... 19%
-// Accept-Ranges: bytes......... 25%
-// Accept-Ranges: none.......... 0.4%
-// Strong Validator............. 50%
-// Strong Validator + ranges.... 24%
-// Strong Validator + CL........ 49%
-//
-bool HttpCache::Transaction::CanResume(bool has_data) {
- // Double check that there is something worth keeping.
- if (has_data && !entry_->disk_entry->GetDataSize(kResponseContentIndex))
- return false;
+void HttpCache::Transaction::SaveSharedNetworkTransactionInfo() {
+ // If network_trans_ is still present, this transaction has not started using
+ // the "shared" network transaction.
+ if (network_trans_) {
+ SaveNetworkTransactionInfo(network_trans_.get());
+ return;
+ }
- if (request_->method != "GET")
- return false;
+ DCHECK(!old_network_trans_load_timing_);
- // Note that if this is a 206, content-length was already fixed after calling
- // PartialData::ResponseHeadersOK().
- if (response_.headers->GetContentLength() <= 0 ||
- response_.headers->HasHeaderValue("Accept-Ranges", "none") ||
- !response_.headers->HasStrongValidators()) {
- return false;
- }
+ // If the transaction is being deleted while still in the waiting queue,
+ // entry_ is not yet set, do nothing.
+ if (!entry_)
+ return;
+ DCHECK(entry_->shared_writers);
+ HttpTransaction* network_transaction =
+ entry_->shared_writers->network_transaction();
+ SaveNetworkTransactionInfo(network_transaction);
+ have_full_request_headers_ =
+ network_transaction->GetFullRequestHeaders(&full_request_headers_);
+}
- return true;
+void HttpCache::Transaction::ResetNetworkTransaction() {
+ SaveNetworkTransactionInfo(network_trans_.get());
+ network_trans_.reset();
+}
+
+HttpTransaction* HttpCache::Transaction::GetCurrentNetworkTransaction() const {
+ if (network_trans_)
+ return network_trans_.get();
+ if (shared_ && entry_ && entry_->shared_writers)
+ return entry_->shared_writers->network_transaction();
+ return nullptr;
}
void HttpCache::Transaction::SetResponse(const HttpResponseInfo& response) {
@@ -2923,8 +3275,10 @@ void HttpCache::Transaction::RecordHistograms() {
validation_cause_, VALIDATION_CAUSE_MAX);
}
+ // This may also exclude shared writing counts for these cases.
if (cache_entry_status_ == CacheEntryStatus::ENTRY_OTHER)
return;
+
DCHECK(!range_requested_);
DCHECK(!first_cache_access_since_.is_null());
@@ -2932,19 +3286,33 @@ void HttpCache::Transaction::RecordHistograms() {
UMA_HISTOGRAM_TIMES("HttpCache.AccessToDone", total_time);
+ if (cache_entry_status_ == CacheEntryStatus::ENTRY_JOIN_SHARED_WRITING) {
+ UMA_HISTOGRAM_TIMES("HttpCache.AccessToDone.JoinSharedWriting", total_time);
+ } else if (cache_entry_status_ ==
+ CacheEntryStatus::ENTRY_DOOM_SHARED_WRITING) {
+ UMA_HISTOGRAM_TIMES("HttpCache.AccessToDone.DoomSharedWriting", total_time);
+ } else if (initiate_shared_writing_) {
+ UMA_HISTOGRAM_TIMES("HttpCache.AccessToDone.InitiateSharedWriting",
+ total_time);
+ }
+
bool did_send_request = !send_request_since_.is_null();
DCHECK(
(did_send_request &&
(cache_entry_status_ == CacheEntryStatus::ENTRY_NOT_IN_CACHE ||
cache_entry_status_ == CacheEntryStatus::ENTRY_VALIDATED ||
cache_entry_status_ == CacheEntryStatus::ENTRY_UPDATED ||
- cache_entry_status_ == CacheEntryStatus::ENTRY_CANT_CONDITIONALIZE)) ||
+ cache_entry_status_ == CacheEntryStatus::ENTRY_CANT_CONDITIONALIZE ||
+ cache_entry_status_ == CacheEntryStatus::ENTRY_JOIN_SHARED_WRITING ||
+ cache_entry_status_ == CacheEntryStatus::ENTRY_DOOM_SHARED_WRITING)) ||
(!did_send_request &&
- cache_entry_status_ == CacheEntryStatus::ENTRY_USED));
+ (cache_entry_status_ == CacheEntryStatus::ENTRY_USED ||
+ cache_entry_status_ == CacheEntryStatus::ENTRY_JOIN_SHARED_WRITING)));
if (!did_send_request) {
- DCHECK(cache_entry_status_ == CacheEntryStatus::ENTRY_USED);
- UMA_HISTOGRAM_TIMES("HttpCache.AccessToDone.Used", total_time);
+ if (cache_entry_status_ == CacheEntryStatus::ENTRY_USED) {
+ UMA_HISTOGRAM_TIMES("HttpCache.AccessToDone.Used", total_time);
+ }
return;
}
@@ -2969,12 +3337,31 @@ void HttpCache::Transaction::RecordHistograms() {
before_send_time);
UMA_HISTOGRAM_PERCENTAGE("HttpCache.PercentBeforeSend.CantConditionalize",
before_send_sample);
+ if (initiate_shared_writing_) {
+ UMA_HISTOGRAM_TIMES(
+ "HttpCache.BeforeSend.CantConditionalize.InitiateSharedWriting",
+ before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE(
+ "HttpCache.PercentBeforeSend.CantConditionalize."
+ "InitiateSharedWriting",
+ before_send_sample);
+ }
+
break;
}
case CacheEntryStatus::ENTRY_NOT_IN_CACHE: {
UMA_HISTOGRAM_TIMES("HttpCache.BeforeSend.NotCached", before_send_time);
UMA_HISTOGRAM_PERCENTAGE("HttpCache.PercentBeforeSend.NotCached",
before_send_sample);
+ if (initiate_shared_writing_) {
+ UMA_HISTOGRAM_TIMES(
+ "HttpCache.BeforeSend.NotCached.InitiateSharedWriting",
+ before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE(
+ "HttpCache.PercentBeforeSend.NotCached.InitiateSharedWriting",
+ before_send_sample);
+ }
+
break;
}
case CacheEntryStatus::ENTRY_VALIDATED: {
@@ -2987,6 +3374,28 @@ void HttpCache::Transaction::RecordHistograms() {
UMA_HISTOGRAM_TIMES("HttpCache.BeforeSend.Updated", before_send_time);
UMA_HISTOGRAM_PERCENTAGE("HttpCache.PercentBeforeSend.Updated",
before_send_sample);
+ if (initiate_shared_writing_) {
+ UMA_HISTOGRAM_TIMES(
+ "HttpCache.BeforeSend.Updated.InitiateSharedWriting",
+ before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE(
+ "HttpCache.PercentBeforeSend.Updated.InitiateSharedWriting",
+ before_send_sample);
+ }
+ break;
+ }
+ case CacheEntryStatus::ENTRY_JOIN_SHARED_WRITING: {
+ UMA_HISTOGRAM_TIMES("HttpCache.BeforeSend.JoinSharedWriting",
+ before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE("HttpCache.PercentBeforeSend.JoinSharedWriting",
+ before_send_sample);
+ break;
+ }
+ case CacheEntryStatus::ENTRY_DOOM_SHARED_WRITING: {
+ UMA_HISTOGRAM_TIMES("HttpCache.BeforeSend.DoomSharedWriting",
+ before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE("HttpCache.PercentBeforeSend.DoomSharedWriting",
+ before_send_sample);
break;
}
default:
@@ -2994,6 +3403,29 @@ void HttpCache::Transaction::RecordHistograms() {
}
}
+bool HttpCache::Transaction::IsEligibleForSharedWriting() const {
+ if (!(mode_ & WRITE))
+ return false;
+
+ DCHECK(request_);
+ if (request_->method != "GET")
+ return false;
+
+ if (partial_ || range_requested_)
+ return false;
+
+ if (truncated_)
+ return false;
+
+ return true;
+}
+
+void HttpCache::Transaction::SetSharedWritingFailState(int result) {
+ next_state_ = STATE_SHARED_READ_WRITE_FAILED;
+ shared_read_write_failure_result_ = result;
+ entry_ = nullptr;
+}
+
void HttpCache::Transaction::OnIOComplete(int result) {
DoLoop(result);
}
« no previous file with comments | « net/http/http_cache_transaction.h ('k') | net/http/http_cache_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698