Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(234)

Unified Diff: net/http/http_cache_transaction.cc

Issue 10808047: Add HttpCache histograms focussed on blocking and transaction type. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: remediate to review Created 8 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/http/http_cache_transaction.h ('k') | net/http/mock_http_cache.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/http/http_cache_transaction.cc
diff --git a/net/http/http_cache_transaction.cc b/net/http/http_cache_transaction.cc
index 652a3b26b6d136d0a7fa3d8926deb9841f940582..423826f447904783b9a20e1b34bb03496cd3a879 100644
--- a/net/http/http_cache_transaction.cc
+++ b/net/http/http_cache_transaction.cc
@@ -39,6 +39,16 @@ using base::Time;
using base::TimeDelta;
using base::TimeTicks;
+namespace {
+
+// The cutoff for tagging small transactions in histograms; this size was chosen
+// to cover resources likely to be received in a single TCP window. With an
+// initial CWND of 10, and an MTU of 1500 bytes, with TCP and HTTP framing
+// overhead this is a size relatively likely to take only one RTT.
+const int kSmallResourceMaxBytes = 14 * 1024;
+
+} // namespace
+
namespace net {
struct HeaderNameAndValue {
@@ -126,7 +136,10 @@ HttpCache::Transaction::Transaction(HttpCache* cache)
ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
ALLOW_THIS_IN_INITIALIZER_LIST(io_callback_(
base::Bind(&Transaction::OnIOComplete,
- weak_factory_.GetWeakPtr()))) {
+ weak_factory_.GetWeakPtr()))),
+ transaction_pattern_(PATTERN_UNDEFINED),
+ bytes_read_from_cache_(0),
+ bytes_read_from_network_(0) {
COMPILE_ASSERT(HttpCache::Transaction::kNumValidationHeaders ==
arraysize(kValidationHeaders),
Invalid_number_of_validation_headers);
@@ -718,6 +731,8 @@ int HttpCache::Transaction::DoSendRequest() {
DCHECK(mode_ & WRITE || mode_ == NONE);
DCHECK(!network_trans_.get());
+ send_request_since_ = TimeTicks::Now();
+
// Create a network transaction.
int rv = cache_->network_layer_->CreateTransaction(&network_trans_);
if (rv != OK)
@@ -737,6 +752,8 @@ int HttpCache::Transaction::DoSendRequestComplete(int result) {
return OK;
}
+ // Do not record requests that have network errors or restarts.
+ UpdateTransactionPattern(PATTERN_NOT_COVERED);
if (IsCertificateError(result)) {
const HttpResponseInfo* response = network_trans_->GetResponseInfo();
// If we get a certificate error, then there is a certificate in ssl_info,
@@ -767,24 +784,27 @@ int HttpCache::Transaction::DoSuccessfulSendRequest() {
// If we have an authentication response, we are exposed to weird things
// hapenning if the user cancels the authentication before we receive
// the new response.
+ UpdateTransactionPattern(PATTERN_NOT_COVERED);
response_ = HttpResponseInfo();
network_trans_.reset();
new_response_ = NULL;
next_state_ = STATE_SEND_REQUEST;
return OK;
}
+ bytes_read_from_network_ += new_response_->headers->raw_headers().size();
if (handling_206_ && mode_ == READ_WRITE && !truncated_ && !is_sparse_) {
// We have stored the full entry, but it changed and the server is
// sending a range. We have to delete the old entry.
DoneWritingToEntry(false);
}
-
if (new_response_->headers->response_code() == 416) {
DCHECK_EQ(NONE, mode_);
response_ = *new_response_;
return OK;
}
+ DCHECK(mode_ != WRITE || transaction_pattern_ == PATTERN_ENTRY_NOT_CACHED ||
+ transaction_pattern_ == PATTERN_NOT_COVERED);
rvargas (doing something else) 2012/07/31 17:43:57 What I really don't like about not handling WRITE
gavinp 2012/07/31 20:16:31 You're right about the state machine. My new uploa
if (mode_ == WRITE &&
(request_->method == "PUT" || request_->method == "DELETE")) {
if (new_response->headers->response_code() == 200) {
@@ -797,9 +817,11 @@ int HttpCache::Transaction::DoSuccessfulSendRequest() {
// Are we expecting a response to a conditional query?
if (mode_ == READ_WRITE || mode_ == UPDATE) {
if (new_response->headers->response_code() == 304 || handling_206_) {
+ UpdateTransactionPattern(PATTERN_ENTRY_VALIDATED);
next_state_ = STATE_UPDATE_CACHED_RESPONSE;
return OK;
}
+ UpdateTransactionPattern(PATTERN_ENTRY_UPDATED);
mode_ = WRITE;
}
@@ -818,6 +840,9 @@ int HttpCache::Transaction::DoNetworkReadComplete(int result) {
if (!cache_)
return ERR_UNEXPECTED;
+ if (result > 0)
+ bytes_read_from_network_ += result;
+
// If there is an error or we aren't saving the data, we are done; just wait
// until the destructor runs to see if we can keep the data.
if (mode_ == NONE || result < 0)
@@ -847,6 +872,7 @@ int HttpCache::Transaction::DoOpenEntry() {
next_state_ = STATE_OPEN_ENTRY_COMPLETE;
cache_pending_ = true;
net_log_.BeginEvent(NetLog::TYPE_HTTP_CACHE_OPEN_ENTRY);
+ open_entry_since_ = TimeTicks::Now();
return cache_->OpenEntry(cache_key_, &new_entry_, this);
}
@@ -866,6 +892,7 @@ int HttpCache::Transaction::DoOpenEntryComplete(int result) {
return OK;
}
+ UpdateTransactionPattern(PATTERN_ENTRY_NOT_CACHED);
if (request_->method == "PUT" || request_->method == "DELETE") {
DCHECK(mode_ == READ_WRITE || mode_ == WRITE);
mode_ = NONE;
@@ -931,6 +958,7 @@ int HttpCache::Transaction::DoCreateEntryComplete(int result) {
int HttpCache::Transaction::DoDoomEntry() {
next_state_ = STATE_DOOM_ENTRY_COMPLETE;
cache_pending_ = true;
+ doom_entry_since_ = TimeTicks::Now();
net_log_.BeginEvent(NetLog::TYPE_HTTP_CACHE_DOOM_ENTRY);
return cache_->DoomEntry(cache_key_, this);
}
@@ -941,7 +969,8 @@ int HttpCache::Transaction::DoDoomEntryComplete(int result) {
cache_pending_ = false;
if (result == ERR_CACHE_RACE)
next_state_ = STATE_INIT_ENTRY;
-
+ else
+ UpdateTransactionPattern(PATTERN_ENTRY_NOT_CACHED);
return OK;
}
@@ -1218,6 +1247,7 @@ int HttpCache::Transaction::DoCacheReadResponseComplete(int result) {
&response_, &truncated_)) {
return OnCacheReadError(result, true);
}
+ bytes_read_from_cache_ += result;
// Some resources may have slipped in as truncated when they're not.
int current_size = entry_->disk_entry->GetDataSize(kResponseContentIndex);
@@ -1239,6 +1269,7 @@ int HttpCache::Transaction::DoCacheReadResponseComplete(int result) {
//
switch (mode_) {
case READ:
+ UpdateTransactionPattern(PATTERN_ENTRY_USED);
result = BeginCacheRead();
break;
case READ_WRITE:
@@ -1303,7 +1334,6 @@ int HttpCache::Transaction::DoCacheReadMetadataComplete(int result) {
net_log_.EndEventWithNetErrorCode(NetLog::TYPE_HTTP_CACHE_READ_INFO, result);
if (result != response_.metadata->size())
return OnCacheReadError(result, false);
-
return OK;
}
@@ -1346,12 +1376,18 @@ int HttpCache::Transaction::DoCacheReadDataComplete(int result) {
if (!cache_)
return ERR_UNEXPECTED;
- if (partial_.get())
+ if (partial_.get()) {
+ // Partial requests are confusing to report in histograms because they may
+ // have multiple underlying requests.
+ UpdateTransactionPattern(PATTERN_NOT_COVERED);
return DoPartialCacheReadCompleted(result);
+ }
if (result > 0) {
read_offset_ += result;
+ bytes_read_from_cache_ += result;
} else if (result == 0) { // End of file.
+ RecordHistograms();
cache_->DoneReadingFromEntry(entry_, this);
entry_ = NULL;
} else {
@@ -1499,6 +1535,7 @@ void HttpCache::Transaction::SetRequest(const BoundNetLog& net_log,
}
if (range_found && !(effective_load_flags_ & LOAD_DISABLE_CACHE)) {
+ UpdateTransactionPattern(PATTERN_NOT_COVERED);
partial_.reset(new PartialData);
if (request_->method == "GET" && partial_->Init(request_->extra_headers)) {
// We will be modifying the actual range requested to the server, so
@@ -1570,13 +1607,18 @@ int HttpCache::Transaction::BeginCacheValidation() {
bool skip_validation = effective_load_flags_ & LOAD_PREFERRING_CACHE ||
!RequiresValidation();
- if (truncated_)
+ if (truncated_) {
+ // Truncated entries can cause partial gets, so we shouldn't record this
+ // load in histograms.
+ UpdateTransactionPattern(PATTERN_NOT_COVERED);
skip_validation = !partial_->initial_validation();
+ }
if ((partial_.get() && !partial_->IsCurrentRangeCached()) || invalid_range_)
skip_validation = false;
if (skip_validation) {
+ UpdateTransactionPattern(PATTERN_ENTRY_USED);
if (partial_.get()) {
// We are going to return the saved response headers to the caller, so
// we may need to adjust them first.
@@ -1594,6 +1636,7 @@ int HttpCache::Transaction::BeginCacheValidation() {
// Our mode remains READ_WRITE for a conditional request. We'll switch to
// either READ or WRITE mode once we hear back from the server.
if (!ConditionalizeRequest()) {
+ UpdateTransactionPattern(PATTERN_NOT_COVERED);
DCHECK(!partial_.get());
DCHECK_NE(206, response_.headers->response_code());
mode_ = WRITE;
@@ -1610,6 +1653,8 @@ int HttpCache::Transaction::BeginPartialCacheValidation() {
!truncated_)
return BeginCacheValidation();
+ // Partial requests should not be recorded in histograms.
+ UpdateTransactionPattern(PATTERN_NOT_COVERED);
if (range_requested_) {
next_state_ = STATE_CACHE_QUERY_DATA;
return OK;
@@ -1670,6 +1715,7 @@ int HttpCache::Transaction::BeginExternallyConditionalizedRequest() {
validator.empty() || validator != external_validation_.values[i]) {
// The externally conditionalized request is not a validation request
// for our existing cache entry. Proceed with caching disabled.
+ UpdateTransactionPattern(PATTERN_NOT_COVERED);
DoneWritingToEntry(true);
}
}
@@ -1893,6 +1939,7 @@ bool HttpCache::Transaction::ValidatePartialResponse() {
if (failure) {
// We cannot truncate this entry, it has to be deleted.
+ UpdateTransactionPattern(PATTERN_NOT_COVERED);
DoomPartialEntry(false);
mode_ = NONE;
if (!reading_ && !partial_->IsLastRange()) {
@@ -2015,9 +2062,7 @@ void HttpCache::Transaction::DoneWritingToEntry(bool success) {
if (!entry_)
return;
- if (cache_->mode() == RECORD)
- DVLOG(1) << "Recorded: " << request_->method << request_->url
- << " status: " << response_.headers->response_code();
+ RecordHistograms();
cache_->DoneWritingToEntry(entry_, success);
entry_ = NULL;
@@ -2112,4 +2157,128 @@ void HttpCache::Transaction::OnIOComplete(int result) {
DoLoop(result);
}
+void HttpCache::Transaction::UpdateTransactionPattern(
+ TransactionPattern new_transaction_pattern) {
+ if (transaction_pattern_ == PATTERN_NOT_COVERED)
+ return;
+ DCHECK(transaction_pattern_ == PATTERN_UNDEFINED ||
+ new_transaction_pattern == PATTERN_NOT_COVERED);
+ transaction_pattern_ = new_transaction_pattern;
+}
+
+void HttpCache::Transaction::RecordHistograms() {
+ DCHECK_NE(PATTERN_UNDEFINED, transaction_pattern_);
+ if (!cache_ || !cache_->GetCurrentBackend() ||
+ cache_->GetCurrentBackend()->GetCacheType() != DISK_CACHE ||
+ cache_->mode() != NORMAL || request_->method != "GET") {
+ return;
+ }
+ UMA_HISTOGRAM_BOOLEAN("HttpCache.HasPattern",
+ transaction_pattern_ != PATTERN_NOT_COVERED);
+ if (transaction_pattern_ == PATTERN_NOT_COVERED)
+ return;
+ DCHECK(!range_requested_);
+ DCHECK(!open_entry_since_.is_null() || !doom_entry_since_.is_null());
+
+ TimeTicks start_time = !open_entry_since_.is_null() ? open_entry_since_ :
+ doom_entry_since_;
+ TimeDelta total_time = base::TimeTicks::Now() - start_time;
+
+ UMA_HISTOGRAM_TIMES("HttpCache.TransactionTime", total_time);
+
+ bool did_send_request = !send_request_since_.is_null();
+ DCHECK(
+ (did_send_request && (transaction_pattern_ == PATTERN_ENTRY_NOT_CACHED ||
+ transaction_pattern_ == PATTERN_ENTRY_VALIDATED ||
+ transaction_pattern_ == PATTERN_ENTRY_UPDATED)) ||
+ (!did_send_request && transaction_pattern_ == PATTERN_ENTRY_USED));
+
+ int resource_size;
+ if (transaction_pattern_ == PATTERN_ENTRY_NOT_CACHED ||
+ transaction_pattern_ == PATTERN_ENTRY_UPDATED) {
+ resource_size = bytes_read_from_network_;
+ } else {
+ DCHECK(transaction_pattern_ == PATTERN_ENTRY_VALIDATED ||
+ transaction_pattern_ == PATTERN_ENTRY_USED);
+ resource_size = bytes_read_from_cache_;
+ }
+
+ bool is_small_resource = resource_size < kSmallResourceMaxBytes;
+ if (is_small_resource)
+ UMA_HISTOGRAM_TIMES("HttpCache.TransactionTime.SmallResource", total_time);
+
+ if (!did_send_request) {
+ DCHECK(transaction_pattern_ == PATTERN_ENTRY_USED);
+ UMA_HISTOGRAM_TIMES("HttpCache.TransactionTime.Used", total_time);
+ if (is_small_resource) {
+ UMA_HISTOGRAM_TIMES(
+ "HttpCache.TransactionTime.Used.SmallResource", total_time);
+ }
+ return;
+ }
+
+ TimeDelta before_send_time = send_request_since_ - start_time;
+ int before_send_percent = before_send_time * 100 / total_time;
+ DCHECK_LE(0, before_send_percent);
+ DCHECK_GE(100, before_send_percent);
+
+ UMA_HISTOGRAM_TIMES("HttpCache.TransactionTime.SentRequest", total_time);
+ UMA_HISTOGRAM_TIMES("HttpCache.BeforeSend", before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE("HttpCache.PercentBeforeSend", before_send_percent);
+ if (is_small_resource) {
+ UMA_HISTOGRAM_TIMES("HttpCache.TransactionTime.SentRequest.SmallResource",
+ total_time);
+ UMA_HISTOGRAM_TIMES("HttpCache.BeforeSend.SmallResource",
+ before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE("HttpCache.PercentBeforeSend.SmallResource",
+ before_send_percent);
+ }
+
+ // TODO(gavinp): Remove or minimize these histograms, particularly the ones
+ // below this comment after we have received initial data.
+ switch (transaction_pattern_) {
+ case PATTERN_ENTRY_NOT_CACHED: {
+ UMA_HISTOGRAM_TIMES("HttpCache.BeforeSend.NotCached", before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE(
+ "HttpCache.PercentBeforeSend.NotCached", before_send_percent);
+ if (is_small_resource) {
+ UMA_HISTOGRAM_TIMES(
+ "HttpCache.BeforeSend.NotCached.SmallResource", before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE(
+ "HttpCache.PercentBeforeSend.NotCached.SmallResource",
+ before_send_percent);
+ }
+ break;
+ }
+ case PATTERN_ENTRY_VALIDATED: {
+ UMA_HISTOGRAM_TIMES("HttpCache.BeforeSend.Validated", before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE(
+ "HttpCache.PercentBeforeSend.Validated", before_send_percent);
+ if (is_small_resource) {
+ UMA_HISTOGRAM_TIMES(
+ "HttpCache.BeforeSend.Validated.SmallResource", before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE(
+ "HttpCache.PercentBeforeSend.Validated.SmallResource",
+ before_send_percent);
+ }
+ break;
+ }
+ case PATTERN_ENTRY_UPDATED: {
+ UMA_HISTOGRAM_TIMES("HttpCache.BeforeSend.Updated", before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE(
+ "HttpCache.PercentBeforeSend.Updated", before_send_percent);
+ if (is_small_resource) {
+ UMA_HISTOGRAM_TIMES(
+ "HttpCache.BeforeSend.Updated.SmallResource", before_send_time);
+ UMA_HISTOGRAM_PERCENTAGE(
+ "HttpCache.PercentBeforeSend.Updated.SmallResource",
+ before_send_percent);
+ }
+ break;
+ }
+ default:
+ NOTREACHED();
+ }
+}
+
} // namespace net
« no previous file with comments | « net/http/http_cache_transaction.h ('k') | net/http/mock_http_cache.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698