Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "chrome/browser/net/predictor.h" | 5 #include "chrome/browser/net/predictor.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <cmath> | 8 #include <cmath> |
| 9 #include <set> | 9 #include <set> |
| 10 #include <sstream> | 10 #include <sstream> |
| 11 | 11 |
| 12 #include "base/command_line.h" | |
| 12 #include "base/compiler_specific.h" | 13 #include "base/compiler_specific.h" |
| 13 #include "base/metrics/histogram.h" | 14 #include "base/metrics/histogram.h" |
| 14 #include "base/stringprintf.h" | 15 #include "base/stringprintf.h" |
| 16 #include "base/synchronization/waitable_event.h" | |
| 15 #include "base/time.h" | 17 #include "base/time.h" |
| 16 #include "base/values.h" | 18 #include "base/values.h" |
| 19 #include "chrome/browser/browser_process.h" | |
| 20 #include "chrome/browser/io_thread.h" | |
| 17 #include "chrome/browser/net/preconnect.h" | 21 #include "chrome/browser/net/preconnect.h" |
| 22 #include "chrome/browser/prefs/browser_prefs.h" | |
| 23 #include "chrome/browser/prefs/pref_service.h" | |
| 24 #include "chrome/browser/prefs/scoped_user_pref_update.h" | |
| 25 #include "chrome/browser/prefs/session_startup_pref.h" | |
| 26 #include "chrome/browser/profiles/profile.h" | |
| 27 #include "chrome/common/chrome_switches.h" | |
| 28 #include "chrome/common/pref_names.h" | |
| 18 #include "content/browser/browser_thread.h" | 29 #include "content/browser/browser_thread.h" |
| 19 #include "net/base/address_list.h" | 30 #include "net/base/address_list.h" |
| 20 #include "net/base/completion_callback.h" | 31 #include "net/base/completion_callback.h" |
| 21 #include "net/base/host_port_pair.h" | 32 #include "net/base/host_port_pair.h" |
| 22 #include "net/base/host_resolver.h" | 33 #include "net/base/host_resolver.h" |
| 23 #include "net/base/net_errors.h" | 34 #include "net/base/net_errors.h" |
| 24 #include "net/base/net_log.h" | 35 #include "net/base/net_log.h" |
| 25 #include "net/base/single_request_host_resolver.h" | 36 #include "net/base/single_request_host_resolver.h" |
| 26 | 37 |
| 27 using base::TimeDelta; | 38 using base::TimeDelta; |
| 28 | 39 |
| 29 namespace chrome_browser_net { | 40 namespace chrome_browser_net { |
| 30 | 41 |
| 42 static void DnsPrefetchMotivatedList(const UrlList& urls, | |
| 43 UrlInfo::ResolutionMotivation motivation); | |
| 31 // static | 44 // static |
| 32 const double Predictor::kPreconnectWorthyExpectedValue = 0.8; | 45 const double Predictor::kPreconnectWorthyExpectedValue = 0.8; |
| 33 // static | 46 // static |
| 34 const double Predictor::kDNSPreresolutionWorthyExpectedValue = 0.1; | 47 const double Predictor::kDNSPreresolutionWorthyExpectedValue = 0.1; |
| 35 // static | 48 // static |
| 36 const double Predictor::kDiscardableExpectedValue = 0.05; | 49 const double Predictor::kDiscardableExpectedValue = 0.05; |
| 37 // The goal is of trimming is to to reduce the importance (number of expected | 50 // The goal is of trimming is to to reduce the importance (number of expected |
| 38 // subresources needed) by a factor of 2 after about 24 hours of uptime. We will | 51 // subresources needed) by a factor of 2 after about 24 hours of uptime. We will |
| 39 // trim roughly once-an-hour of uptime. The ratio to use in each trim operation | 52 // trim roughly once-an-hour of uptime. The ratio to use in each trim operation |
| 40 // is then the 24th root of 0.5. If a user only surfs for 4 hours a day, then | 53 // is then the 24th root of 0.5. If a user only surfs for 4 hours a day, then |
| 41 // after about 6 days they will have halved all their estimates of subresource | 54 // after about 6 days they will have halved all their estimates of subresource |
| 42 // connections. Once this falls below kDiscardableExpectedValue the referrer | 55 // connections. Once this falls below kDiscardableExpectedValue the referrer |
| 43 // will be discarded. | 56 // will be discarded. |
| 44 // TODO(jar): Measure size of referrer lists in the field. Consider an adaptive | 57 // TODO(jar): Measure size of referrer lists in the field. Consider an adaptive |
| 45 // system that uses a higher trim ratio when the list is large. | 58 // system that uses a higher trim ratio when the list is large. |
| 46 // static | 59 // static |
| 47 const double Predictor::kReferrerTrimRatio = 0.97153; | 60 const double Predictor::kReferrerTrimRatio = 0.97153; |
| 48 | 61 |
| 49 // static | 62 // static |
| 50 const TimeDelta Predictor::kDurationBetweenTrimmings = TimeDelta::FromHours(1); | 63 const TimeDelta Predictor::kDurationBetweenTrimmings = TimeDelta::FromHours(1); |
| 51 // static | 64 // static |
| 52 const TimeDelta Predictor::kDurationBetweenTrimmingIncrements = | 65 const TimeDelta Predictor::kDurationBetweenTrimmingIncrements = |
| 53 TimeDelta::FromSeconds(15); | 66 TimeDelta::FromSeconds(15); |
| 54 // static | 67 // static |
| 55 const size_t Predictor::kUrlsTrimmedPerIncrement = 5u; | 68 const size_t Predictor::kUrlsTrimmedPerIncrement = 5u; |
| 69 // static | |
| 70 const size_t Predictor::kMaxSpeculativeParallelResolves = 3; | |
| 71 // To control our congestion avoidance system, which discards a queue when | |
| 72 // resolutions are "taking too long," we need an expected resolution time. | |
| 73 // Common average is in the range of 300-500ms. | |
| 74 const int kExpectedResolutionTimeMs = 500; | |
| 75 // static | |
| 76 const int Predictor::kTypicalSpeculativeGroupSize = 8; | |
| 77 // static | |
| 78 const int Predictor::kMaxSpeculativeResolveQueueDelayMs = | |
| 79 (kExpectedResolutionTimeMs * Predictor::kTypicalSpeculativeGroupSize) / | |
| 80 Predictor::kMaxSpeculativeParallelResolves; | |
| 81 | |
| 82 static int g_max_queueing_delay_ms; | |
| 83 static size_t g_max_parallel_resolves = 0u; | |
| 84 | |
| 85 // A version number for prefs that are saved. This should be incremented when | |
| 86 // we change the format so that we discard old data. | |
| 87 static const int kPredictorStartupFormatVersion = 1; | |
| 56 | 88 |
| 57 class Predictor::LookupRequest { | 89 class Predictor::LookupRequest { |
| 58 public: | 90 public: |
| 59 LookupRequest(Predictor* predictor, | 91 LookupRequest(Predictor* predictor, |
| 60 net::HostResolver* host_resolver, | 92 net::HostResolver* host_resolver, |
| 61 const GURL& url) | 93 const GURL& url) |
| 62 : ALLOW_THIS_IN_INITIALIZER_LIST( | 94 : ALLOW_THIS_IN_INITIALIZER_LIST( |
| 63 net_callback_(this, &LookupRequest::OnLookupFinished)), | 95 net_callback_(this, &LookupRequest::OnLookupFinished)), |
| 64 predictor_(predictor), | 96 predictor_(predictor), |
| 65 url_(url), | 97 url_(url), |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 92 | 124 |
| 93 Predictor* predictor_; // The predictor which started us. | 125 Predictor* predictor_; // The predictor which started us. |
| 94 | 126 |
| 95 const GURL url_; // Hostname to resolve. | 127 const GURL url_; // Hostname to resolve. |
| 96 net::SingleRequestHostResolver resolver_; | 128 net::SingleRequestHostResolver resolver_; |
| 97 net::AddressList addresses_; | 129 net::AddressList addresses_; |
| 98 | 130 |
| 99 DISALLOW_COPY_AND_ASSIGN(LookupRequest); | 131 DISALLOW_COPY_AND_ASSIGN(LookupRequest); |
| 100 }; | 132 }; |
| 101 | 133 |
| 102 Predictor::Predictor(net::HostResolver* host_resolver, | 134 Predictor::Predictor() |
| 103 TimeDelta max_dns_queue_delay, | 135 : predictor_enabled_(true), |
| 104 size_t max_concurrent, | 136 peak_pending_lookups_(0), |
| 105 bool preconnect_enabled) | |
| 106 : peak_pending_lookups_(0), | |
| 107 shutdown_(false), | 137 shutdown_(false), |
| 108 max_concurrent_dns_lookups_(max_concurrent), | 138 max_concurrent_dns_lookups_(g_max_parallel_resolves), |
| 109 max_dns_queue_delay_(max_dns_queue_delay), | 139 max_dns_queue_delay_( |
| 110 host_resolver_(host_resolver), | 140 TimeDelta::FromMilliseconds(g_max_queueing_delay_ms)), |
| 111 preconnect_enabled_(preconnect_enabled), | 141 host_resolver_(NULL), |
| 142 preconnect_enabled_(true), | |
| 112 consecutive_omnibox_preconnect_count_(0), | 143 consecutive_omnibox_preconnect_count_(0), |
| 113 next_trim_time_(base::TimeTicks::Now() + kDurationBetweenTrimmings), | 144 next_trim_time_(base::TimeTicks::Now() + kDurationBetweenTrimmings) { |
| 114 ALLOW_THIS_IN_INITIALIZER_LIST(trim_task_factory_(this)) { | 145 initial_observer_.reset(new InitialObserver()); |
|
jar (doing other things)
2011/08/16 01:19:09
Other code (for finalizing initialization) suggest
rpetterson
2011/08/16 03:52:12
I've added a DCHECK that the Predictor constructor
| |
| 146 | |
| 147 const CommandLine* command_line = CommandLine::ForCurrentProcess(); | |
| 148 if (command_line->HasSwitch(switches::kDisablePreconnect)) | |
| 149 preconnect_enabled_ = false; | |
| 150 else if (command_line->HasSwitch(switches::kEnablePreconnect)) | |
| 151 preconnect_enabled_ = true; | |
| 115 } | 152 } |
| 116 | 153 |
| 117 Predictor::~Predictor() { | 154 Predictor::~Predictor() { |
| 118 DCHECK(shutdown_); | 155 DCHECK(shutdown_); |
| 119 } | 156 } |
| 120 | 157 |
| 121 void Predictor::Shutdown() { | 158 // --------------------- Start UI methods. ------------------------------------ |
| 122 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 123 DCHECK(!shutdown_); | |
| 124 shutdown_ = true; | |
| 125 | 159 |
| 126 std::set<LookupRequest*>::iterator it; | 160 void Predictor::InitNetworkPredictor(PrefService* user_prefs) { |
| 127 for (it = pending_lookups_.begin(); it != pending_lookups_.end(); ++it) | 161 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); |
| 128 delete *it; | 162 |
| 163 predictor_enabled_ = user_prefs->GetBoolean(prefs::kNetworkPredictionEnabled); | |
| 164 | |
| 165 // Gather the list of hostnames to prefetch on startup. | |
| 166 PrefService* local_state = g_browser_process->local_state(); | |
| 167 UrlList urls = GetPredictedUrlListAtStartup(user_prefs, local_state); | |
| 168 | |
| 169 base::ListValue* referral_list = | |
| 170 static_cast<base::ListValue*>(user_prefs->GetList( | |
| 171 prefs::kDnsPrefetchingHostReferralList)->DeepCopy()); | |
| 172 | |
| 173 // Remove obsolete preferences from local state if necessary. | |
| 174 int current_version = | |
| 175 local_state->GetInteger(prefs::kMultipleProfilePrefMigration); | |
| 176 if ((current_version & browser::DNS_PREFS) == 0) { | |
| 177 local_state->RegisterListPref(prefs::kDnsStartupPrefetchList, | |
| 178 PrefService::UNSYNCABLE_PREF); | |
| 179 local_state->RegisterListPref(prefs::kDnsHostReferralList, | |
| 180 PrefService::UNSYNCABLE_PREF); | |
| 181 local_state->ClearPref(prefs::kDnsStartupPrefetchList); | |
| 182 local_state->ClearPref(prefs::kDnsHostReferralList); | |
| 183 local_state->SetInteger(prefs::kMultipleProfilePrefMigration, | |
| 184 current_version | browser::DNS_PREFS); | |
| 185 } | |
| 186 | |
| 187 g_browser_process->io_thread()->InitNetworkPredictor( | |
| 188 urls, referral_list, this); | |
| 129 } | 189 } |
| 130 | 190 |
| 131 // Overloaded Resolve() to take a vector of names. | 191 void Predictor::AnticipateOmniboxUrl(const GURL& url, bool preconnectable) { |
| 132 void Predictor::ResolveList(const UrlList& urls, | 192 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); |
| 133 UrlInfo::ResolutionMotivation motivation) { | 193 if (!predictor_enabled_) |
| 134 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 135 | |
| 136 for (UrlList::const_iterator it = urls.begin(); it < urls.end(); ++it) { | |
| 137 AppendToResolutionQueue(*it, motivation); | |
| 138 } | |
| 139 } | |
| 140 | |
| 141 // Basic Resolve() takes an invidual name, and adds it | |
| 142 // to the queue. | |
| 143 void Predictor::Resolve(const GURL& url, | |
| 144 UrlInfo::ResolutionMotivation motivation) { | |
| 145 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 146 if (!url.has_host()) | |
| 147 return; | 194 return; |
| 148 AppendToResolutionQueue(url, motivation); | 195 if (!url.is_valid() || !url.has_host()) |
| 149 } | 196 return; |
| 150 | |
| 151 void Predictor::LearnFromNavigation(const GURL& referring_url, | |
| 152 const GURL& target_url) { | |
| 153 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 154 DCHECK_EQ(referring_url, Predictor::CanonicalizeUrl(referring_url)); | |
| 155 DCHECK_NE(referring_url, GURL::EmptyGURL()); | |
| 156 DCHECK_EQ(target_url, Predictor::CanonicalizeUrl(target_url)); | |
| 157 DCHECK_NE(target_url, GURL::EmptyGURL()); | |
| 158 | |
| 159 referrers_[referring_url].SuggestHost(target_url); | |
| 160 // Possibly do some referrer trimming. | |
| 161 TrimReferrers(); | |
| 162 } | |
| 163 | |
| 164 enum SubresourceValue { | |
| 165 PRECONNECTION, | |
| 166 PRERESOLUTION, | |
| 167 TOO_NEW, | |
| 168 SUBRESOURCE_VALUE_MAX | |
| 169 }; | |
| 170 | |
| 171 void Predictor::AnticipateOmniboxUrl(const GURL& url, bool preconnectable) { | |
| 172 std::string host = url.HostNoBrackets(); | 197 std::string host = url.HostNoBrackets(); |
| 173 bool is_new_host_request = (host != last_omnibox_host_); | 198 bool is_new_host_request = (host != last_omnibox_host_); |
| 174 last_omnibox_host_ = host; | 199 last_omnibox_host_ = host; |
| 175 | 200 |
| 176 UrlInfo::ResolutionMotivation motivation(UrlInfo::OMNIBOX_MOTIVATED); | 201 UrlInfo::ResolutionMotivation motivation(UrlInfo::OMNIBOX_MOTIVATED); |
| 177 base::TimeTicks now = base::TimeTicks::Now(); | 202 base::TimeTicks now = base::TimeTicks::Now(); |
| 178 | 203 |
| 179 if (preconnect_enabled()) { | 204 if (preconnect_enabled()) { |
| 180 if (preconnectable && !is_new_host_request) { | 205 if (preconnectable && !is_new_host_request) { |
| 181 ++consecutive_omnibox_preconnect_count_; | 206 ++consecutive_omnibox_preconnect_count_; |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 230 | 255 |
| 231 // Perform at least DNS pre-resolution. | 256 // Perform at least DNS pre-resolution. |
| 232 BrowserThread::PostTask( | 257 BrowserThread::PostTask( |
| 233 BrowserThread::IO, | 258 BrowserThread::IO, |
| 234 FROM_HERE, | 259 FROM_HERE, |
| 235 NewRunnableMethod(this, &Predictor::Resolve, CanonicalizeUrl(url), | 260 NewRunnableMethod(this, &Predictor::Resolve, CanonicalizeUrl(url), |
| 236 motivation)); | 261 motivation)); |
| 237 } | 262 } |
| 238 | 263 |
| 239 void Predictor::PreconnectUrlAndSubresources(const GURL& url) { | 264 void Predictor::PreconnectUrlAndSubresources(const GURL& url) { |
| 265 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); | |
| 266 if (!predictor_enabled_) | |
| 267 return; | |
| 268 if (!url.is_valid() || !url.has_host()) | |
| 269 return; | |
| 240 if (preconnect_enabled()) { | 270 if (preconnect_enabled()) { |
| 241 std::string host = url.HostNoBrackets(); | 271 std::string host = url.HostNoBrackets(); |
| 242 UrlInfo::ResolutionMotivation motivation(UrlInfo::EARLY_LOAD_MOTIVATED); | 272 UrlInfo::ResolutionMotivation motivation(UrlInfo::EARLY_LOAD_MOTIVATED); |
| 243 const int kConnectionsNeeded = 1; | 273 const int kConnectionsNeeded = 1; |
| 244 PreconnectOnUIThread(CanonicalizeUrl(url), motivation, | 274 PreconnectOnUIThread(CanonicalizeUrl(url), motivation, |
| 245 kConnectionsNeeded); | 275 kConnectionsNeeded); |
| 246 PredictFrameSubresources(url.GetWithEmptyPath()); | 276 PredictFrameSubresources(url.GetWithEmptyPath()); |
| 247 } | 277 } |
| 248 } | 278 } |
| 249 | 279 |
| 250 void Predictor::PredictFrameSubresources(const GURL& url) { | 280 void Predictor::PredictFrameSubresources(const GURL& url) { |
| 281 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); | |
| 282 if (!predictor_enabled_) | |
| 283 return; | |
| 251 DCHECK_EQ(url.GetWithEmptyPath(), url); | 284 DCHECK_EQ(url.GetWithEmptyPath(), url); |
| 252 // Add one pass through the message loop to allow current navigation to | 285 // Add one pass through the message loop to allow current navigation to |
| 253 // proceed. | 286 // proceed. |
| 254 BrowserThread::PostTask( | 287 BrowserThread::PostTask( |
| 255 BrowserThread::IO, | 288 BrowserThread::IO, |
| 256 FROM_HERE, | 289 FROM_HERE, |
| 257 NewRunnableMethod(this, &Predictor::PrepareFrameSubresources, url)); | 290 NewRunnableMethod(this, &Predictor::PrepareFrameSubresources, url)); |
| 258 } | 291 } |
| 259 | 292 |
| 260 void Predictor::PrepareFrameSubresources(const GURL& url) { | 293 UrlList Predictor::GetPredictedUrlListAtStartup( |
| 261 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 294 PrefService* user_prefs, |
| 262 DCHECK_EQ(url.GetWithEmptyPath(), url); | 295 PrefService* local_state) { |
| 263 Referrers::iterator it = referrers_.find(url); | 296 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); |
| 264 if (referrers_.end() == it) { | 297 UrlList urls; |
| 265 // Only when we don't know anything about this url, make 2 connections | 298 // Recall list of URLs we learned about during last session. |
| 266 // available. We could do this completely via learning (by prepopulating | 299 // This may catch secondary hostnames, pulled in by the homepages. It will |
| 267 // the referrer_ list with this expected value), but it would swell the | 300 // also catch more of the "primary" home pages, since that was (presumably) |
| 268 // size of the list with all the "Leaf" nodes in the tree (nodes that don't | 301 // rendered first (and will be rendered first this time too). |
| 269 // load any subresources). If we learn about this resource, we will instead | 302 const ListValue* startup_list = |
| 270 // provide a more carefully estimated preconnection count. | 303 user_prefs->GetList(prefs::kDnsPrefetchingStartupList); |
| 271 if (preconnect_enabled_) | 304 |
| 272 PreconnectOnIOThread(url, UrlInfo::SELF_REFERAL_MOTIVATED, 2); | 305 if (startup_list) { |
| 273 return; | 306 base::ListValue::const_iterator it = startup_list->begin(); |
| 307 int format_version = -1; | |
| 308 if (it != startup_list->end() && | |
| 309 (*it)->GetAsInteger(&format_version) && | |
| 310 format_version == kPredictorStartupFormatVersion) { | |
| 311 ++it; | |
| 312 for (; it != startup_list->end(); ++it) { | |
| 313 std::string url_spec; | |
| 314 if (!(*it)->GetAsString(&url_spec)) { | |
| 315 LOG(DFATAL); | |
| 316 break; // Format incompatibility. | |
| 317 } | |
| 318 GURL url(url_spec); | |
| 319 if (!url.has_host() || !url.has_scheme()) { | |
| 320 LOG(DFATAL); | |
| 321 break; // Format incompatibility. | |
| 322 } | |
| 323 | |
| 324 urls.push_back(url); | |
| 325 } | |
| 326 } | |
| 274 } | 327 } |
| 275 | 328 |
| 276 Referrer* referrer = &(it->second); | 329 // Prepare for any static home page(s) the user has in prefs. The user may |
| 277 referrer->IncrementUseCount(); | 330 // have a LOT of tab's specified, so we may as well try to warm them all. |
| 278 const UrlInfo::ResolutionMotivation motivation = | 331 SessionStartupPref tab_start_pref = |
| 279 UrlInfo::LEARNED_REFERAL_MOTIVATED; | 332 SessionStartupPref::GetStartupPref(user_prefs); |
| 280 for (Referrer::iterator future_url = referrer->begin(); | 333 if (SessionStartupPref::URLS == tab_start_pref.type) { |
| 281 future_url != referrer->end(); ++future_url) { | 334 for (size_t i = 0; i < tab_start_pref.urls.size(); i++) { |
| 282 SubresourceValue evalution(TOO_NEW); | 335 GURL gurl = tab_start_pref.urls[i]; |
| 283 double connection_expectation = future_url->second.subresource_use_rate(); | 336 if (!gurl.is_valid() || gurl.SchemeIsFile() || gurl.host().empty()) |
| 284 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.PreconnectSubresourceExpectation", | 337 continue; |
| 285 static_cast<int>(connection_expectation * 100), | 338 if (gurl.SchemeIs("http") || gurl.SchemeIs("https")) |
| 286 10, 5000, 50); | 339 urls.push_back(gurl.GetWithEmptyPath()); |
| 287 future_url->second.ReferrerWasObserved(); | |
| 288 if (preconnect_enabled_ && | |
| 289 connection_expectation > kPreconnectWorthyExpectedValue) { | |
| 290 evalution = PRECONNECTION; | |
| 291 future_url->second.IncrementPreconnectionCount(); | |
| 292 int count = static_cast<int>(std::ceil(connection_expectation)); | |
| 293 if (url.host() == future_url->first.host()) | |
| 294 ++count; | |
| 295 PreconnectOnIOThread(future_url->first, motivation, count); | |
| 296 } else if (connection_expectation > kDNSPreresolutionWorthyExpectedValue) { | |
| 297 evalution = PRERESOLUTION; | |
| 298 future_url->second.preresolution_increment(); | |
| 299 UrlInfo* queued_info = AppendToResolutionQueue(future_url->first, | |
| 300 motivation); | |
| 301 if (queued_info) | |
| 302 queued_info->SetReferringHostname(url); | |
| 303 } | 340 } |
| 304 UMA_HISTOGRAM_ENUMERATION("Net.PreconnectSubresourceEval", evalution, | |
| 305 SUBRESOURCE_VALUE_MAX); | |
| 306 } | 341 } |
| 342 | |
| 343 if (urls.empty()) | |
| 344 urls.push_back(GURL("http://www.google.com:80")); | |
| 345 | |
| 346 return urls; | |
| 347 } | |
| 348 | |
| 349 void Predictor::set_max_queueing_delay(int max_queueing_delay_ms) { | |
| 350 g_max_queueing_delay_ms = max_queueing_delay_ms; | |
| 351 } | |
| 352 | |
| 353 void Predictor::set_max_parallel_resolves(size_t max_parallel_resolves) { | |
| 354 g_max_parallel_resolves = max_parallel_resolves; | |
| 355 } | |
| 356 | |
| 357 | |
| 358 void Predictor::RegisterUserPrefs(PrefService* user_prefs) { | |
| 359 user_prefs->RegisterListPref(prefs::kDnsPrefetchingStartupList, | |
| 360 PrefService::UNSYNCABLE_PREF); | |
| 361 user_prefs->RegisterListPref(prefs::kDnsPrefetchingHostReferralList, | |
| 362 PrefService::UNSYNCABLE_PREF); | |
| 363 } | |
| 364 | |
| 365 // ---------------------- End UI methods. ------------------------------------- | |
| 366 | |
| 367 // --------------------- Start IO methods. ------------------------------------ | |
| 368 | |
| 369 void Predictor::Shutdown() { | |
| 370 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 371 DCHECK(!shutdown_); | |
| 372 shutdown_ = true; | |
| 373 | |
| 374 std::set<LookupRequest*>::iterator it; | |
| 375 for (it = pending_lookups_.begin(); it != pending_lookups_.end(); ++it) | |
| 376 delete *it; | |
| 377 } | |
| 378 | |
| 379 void Predictor::DiscardAllResults() { | |
| 380 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 381 // Delete anything listed so far in this session that shows in about:dns. | |
| 382 referrers_.clear(); | |
| 383 | |
| 384 | |
| 385 // Try to delete anything in our work queue. | |
| 386 while (!work_queue_.IsEmpty()) { | |
| 387 // Emulate processing cycle as though host was not found. | |
| 388 GURL url = work_queue_.Pop(); | |
| 389 UrlInfo* info = &results_[url]; | |
| 390 DCHECK(info->HasUrl(url)); | |
| 391 info->SetAssignedState(); | |
| 392 info->SetNoSuchNameState(); | |
| 393 } | |
| 394 // Now every result_ is either resolved, or is being resolved | |
| 395 // (see LookupRequest). | |
| 396 | |
| 397 // Step through result_, recording names of all hosts that can't be erased. | |
| 398 // We can't erase anything being worked on. | |
| 399 Results assignees; | |
| 400 for (Results::iterator it = results_.begin(); results_.end() != it; ++it) { | |
| 401 GURL url(it->first); | |
| 402 UrlInfo* info = &it->second; | |
| 403 DCHECK(info->HasUrl(url)); | |
| 404 if (info->is_assigned()) { | |
| 405 info->SetPendingDeleteState(); | |
| 406 assignees[url] = *info; | |
| 407 } | |
| 408 } | |
| 409 DCHECK(assignees.size() <= max_concurrent_dns_lookups_); | |
| 410 results_.clear(); | |
| 411 // Put back in the names being worked on. | |
| 412 for (Results::iterator it = assignees.begin(); assignees.end() != it; ++it) { | |
| 413 DCHECK(it->second.is_marked_to_delete()); | |
| 414 results_[it->first] = it->second; | |
| 415 } | |
| 416 } | |
| 417 | |
| 418 // Overloaded Resolve() to take a vector of names. | |
| 419 void Predictor::ResolveList(const UrlList& urls, | |
| 420 UrlInfo::ResolutionMotivation motivation) { | |
| 421 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 422 | |
| 423 for (UrlList::const_iterator it = urls.begin(); it < urls.end(); ++it) { | |
| 424 AppendToResolutionQueue(*it, motivation); | |
| 425 } | |
| 426 } | |
| 427 | |
| 428 // Basic Resolve() takes an invidual name, and adds it | |
| 429 // to the queue. | |
| 430 void Predictor::Resolve(const GURL& url, | |
| 431 UrlInfo::ResolutionMotivation motivation) { | |
| 432 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 433 if (!url.has_host()) | |
| 434 return; | |
| 435 AppendToResolutionQueue(url, motivation); | |
| 436 } | |
| 437 | |
| 438 void Predictor::LearnFromNavigation(const GURL& referring_url, | |
| 439 const GURL& target_url) { | |
| 440 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 441 if (!predictor_enabled_) | |
| 442 return; | |
| 443 DCHECK_EQ(referring_url, Predictor::CanonicalizeUrl(referring_url)); | |
| 444 DCHECK_NE(referring_url, GURL::EmptyGURL()); | |
| 445 DCHECK_EQ(target_url, Predictor::CanonicalizeUrl(target_url)); | |
| 446 DCHECK_NE(target_url, GURL::EmptyGURL()); | |
| 447 | |
| 448 referrers_[referring_url].SuggestHost(target_url); | |
| 449 // Possibly do some referrer trimming. | |
| 450 TrimReferrers(); | |
| 307 } | 451 } |
| 308 | 452 |
| 309 // Provide sort order so all .com's are together, etc. | 453 // Provide sort order so all .com's are together, etc. |
| 310 struct RightToLeftStringSorter { | 454 struct RightToLeftStringSorter { |
| 311 bool operator()(const GURL& left, | 455 bool operator()(const GURL& left, |
| 312 const GURL& right) const { | 456 const GURL& right) const { |
| 313 return string_compare(left.host(), right.host()); | 457 return string_compare(left.host(), right.host()); |
| 314 } | 458 } |
| 315 | 459 |
| 316 static bool string_compare(const std::string& left_host, | 460 static bool string_compare(const std::string& left_host, |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 411 static_cast<int>(future_url->second.preresolution_count()), | 555 static_cast<int>(future_url->second.preresolution_count()), |
| 412 static_cast<double>(future_url->second.subresource_use_rate()), | 556 static_cast<double>(future_url->second.subresource_use_rate()), |
| 413 future_url->first.spec().c_str()); | 557 future_url->first.spec().c_str()); |
| 414 } | 558 } |
| 415 } | 559 } |
| 416 output->append("</table>"); | 560 output->append("</table>"); |
| 417 } | 561 } |
| 418 | 562 |
| 419 void Predictor::GetHtmlInfo(std::string* output) { | 563 void Predictor::GetHtmlInfo(std::string* output) { |
| 420 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 564 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 565 if (initial_observer_.get()) | |
| 566 initial_observer_->GetFirstResolutionsHtml(output); | |
| 567 // Show list of subresource predictions and stats. | |
| 568 GetHtmlReferrerLists(output); | |
| 569 | |
| 421 // Local lists for calling UrlInfo | 570 // Local lists for calling UrlInfo |
| 422 UrlInfo::UrlInfoTable name_not_found; | 571 UrlInfo::UrlInfoTable name_not_found; |
| 423 UrlInfo::UrlInfoTable name_preresolved; | 572 UrlInfo::UrlInfoTable name_preresolved; |
| 424 | 573 |
| 425 // Get copies of all useful data. | 574 // Get copies of all useful data. |
| 426 typedef std::map<GURL, UrlInfo, RightToLeftStringSorter> SortedUrlInfo; | 575 typedef std::map<GURL, UrlInfo, RightToLeftStringSorter> SortedUrlInfo; |
| 427 SortedUrlInfo snapshot; | 576 SortedUrlInfo snapshot; |
| 428 // UrlInfo supports value semantics, so we can do a shallow copy. | 577 // UrlInfo supports value semantics, so we can do a shallow copy. |
| 429 for (Results::iterator it(results_.begin()); it != results_.end(); it++) | 578 for (Results::iterator it(results_.begin()); it != results_.end(); it++) |
| 430 snapshot[it->first] = it->second; | 579 snapshot[it->first] = it->second; |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 446 brief = true; | 595 brief = true; |
| 447 #endif // NDEBUG | 596 #endif // NDEBUG |
| 448 | 597 |
| 449 // Call for display of each table, along with title. | 598 // Call for display of each table, along with title. |
| 450 UrlInfo::GetHtmlTable(name_preresolved, | 599 UrlInfo::GetHtmlTable(name_preresolved, |
| 451 "Preresolution DNS records performed for ", brief, output); | 600 "Preresolution DNS records performed for ", brief, output); |
| 452 UrlInfo::GetHtmlTable(name_not_found, | 601 UrlInfo::GetHtmlTable(name_not_found, |
| 453 "Preresolving DNS records revealed non-existence for ", brief, output); | 602 "Preresolving DNS records revealed non-existence for ", brief, output); |
| 454 } | 603 } |
| 455 | 604 |
| 456 UrlInfo* Predictor::AppendToResolutionQueue( | 605 void Predictor::TrimReferrersNow() { |
| 457 const GURL& url, | 606 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 607 // Just finish up work if an incremental trim is in progress. | |
| 608 if (urls_being_trimmed_.empty()) | |
| 609 LoadUrlsForTrimming(); | |
| 610 IncrementalTrimReferrers(true); // Do everything now. | |
| 611 } | |
| 612 | |
| 613 void Predictor::SerializeReferrers(base::ListValue* referral_list) { | |
| 614 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 615 referral_list->Clear(); | |
| 616 referral_list->Append(new FundamentalValue(PREDICTOR_REFERRER_VERSION)); | |
| 617 for (Referrers::const_iterator it = referrers_.begin(); | |
| 618 it != referrers_.end(); ++it) { | |
| 619 // Serialize the list of subresource names. | |
| 620 Value* subresource_list(it->second.Serialize()); | |
| 621 | |
| 622 // Create a list for each referer. | |
| 623 base::ListValue* motivator(new base::ListValue); | |
| 624 motivator->Append(new StringValue(it->first.spec())); | |
| 625 motivator->Append(subresource_list); | |
| 626 | |
| 627 referral_list->Append(motivator); | |
| 628 } | |
| 629 } | |
| 630 | |
| 631 void Predictor::DeserializeReferrers(const base::ListValue& referral_list) { | |
| 632 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 633 int format_version = -1; | |
| 634 if (referral_list.GetSize() > 0 && | |
| 635 referral_list.GetInteger(0, &format_version) && | |
| 636 format_version == PREDICTOR_REFERRER_VERSION) { | |
| 637 for (size_t i = 1; i < referral_list.GetSize(); ++i) { | |
| 638 base::ListValue* motivator; | |
| 639 if (!referral_list.GetList(i, &motivator)) { | |
| 640 NOTREACHED(); | |
| 641 return; | |
| 642 } | |
| 643 std::string motivating_url_spec; | |
| 644 if (!motivator->GetString(0, &motivating_url_spec)) { | |
| 645 NOTREACHED(); | |
| 646 return; | |
| 647 } | |
| 648 | |
| 649 Value* subresource_list; | |
| 650 if (!motivator->Get(1, &subresource_list)) { | |
| 651 NOTREACHED(); | |
| 652 return; | |
| 653 } | |
| 654 | |
| 655 referrers_[GURL(motivating_url_spec)].Deserialize(*subresource_list); | |
| 656 } | |
| 657 } | |
| 658 } | |
| 659 | |
| 660 void Predictor::DeserializeReferrersThenDelete( | |
| 661 base::ListValue* referral_list) { | |
| 662 DeserializeReferrers(*referral_list); | |
| 663 delete referral_list; | |
| 664 } | |
| 665 | |
| 666 void Predictor::DiscardInitialNavigationHistory() { | |
| 667 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 668 if (initial_observer_.get()) | |
| 669 initial_observer_->DiscardInitialNavigationHistory(); | |
| 670 } | |
| 671 | |
| 672 void Predictor::FinalizeInitializationOnIOThread( | |
| 673 const UrlList& startup_urls, | |
| 674 base::ListValue* referral_list, | |
| 675 net::HostResolver* host_resolver) { | |
| 676 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 677 | |
| 678 host_resolver_ = host_resolver; | |
| 679 | |
| 680 // ScopedrunnableMethodFactory instances need to be created and destroyed | |
| 681 // on the same thread. The predictor lives on the IO thread and will die | |
| 682 // from there so now that we're on the IO thread we need to properly | |
| 683 // initialize the ScopedrunnableMethodFactory. | |
| 684 trim_task_factory_.reset(new ScopedRunnableMethodFactory<Predictor>(this)); | |
| 685 | |
| 686 // Prefetch these hostnames on startup. | |
| 687 DnsPrefetchMotivatedList(startup_urls, UrlInfo::STARTUP_LIST_MOTIVATED); | |
| 688 DeserializeReferrersThenDelete(referral_list); | |
| 689 } | |
| 690 | |
| 691 //----------------------------------------------------------------------------- | |
| 692 // This section intermingles prefetch results with actual browser HTTP | |
| 693 // network activity. It supports calculating of the benefit of a prefetch, as | |
| 694 // well as recording what prefetched hostname resolutions might be potentially | |
| 695 // helpful during the next chrome-startup. | |
| 696 //----------------------------------------------------------------------------- | |
| 697 | |
| 698 void Predictor::LearnAboutInitialNavigation(const GURL& url) { | |
| 699 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 700 if (!predictor_enabled_ || NULL == initial_observer_.get() ) | |
| 701 return; | |
| 702 initial_observer_->Append(url, this); | |
| 703 } | |
| 704 | |
| 705 // This API is only used in the browser process. | |
| 706 // It is called from an IPC message originating in the renderer. It currently | |
| 707 // includes both Page-Scan, and Link-Hover prefetching. | |
| 708 // TODO(jar): Separate out link-hover prefetching, and page-scan results. | |
| 709 void Predictor::DnsPrefetchList(const NameList& hostnames) { | |
| 710 // TODO(jar): Push GURL transport further back into renderer, but this will | |
| 711 // require a Webkit change in the observer :-/. | |
| 712 UrlList urls; | |
| 713 for (NameList::const_iterator it = hostnames.begin(); | |
| 714 it < hostnames.end(); | |
| 715 ++it) { | |
| 716 urls.push_back(GURL("http://" + *it + ":80")); | |
| 717 } | |
| 718 | |
| 719 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 720 DnsPrefetchMotivatedList(urls, UrlInfo::PAGE_SCAN_MOTIVATED); | |
| 721 } | |
| 722 | |
| 723 void Predictor::DnsPrefetchMotivatedList( | |
| 724 const UrlList& urls, | |
| 458 UrlInfo::ResolutionMotivation motivation) { | 725 UrlInfo::ResolutionMotivation motivation) { |
| 459 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 726 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI) || |
| 460 DCHECK(url.has_host()); | 727 BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 461 | 728 if (!predictor_enabled_) |
| 462 if (shutdown_) | 729 return; |
| 463 return NULL; | 730 |
| 464 | 731 if (BrowserThread::CurrentlyOn(BrowserThread::IO)) { |
| 465 UrlInfo* info = &results_[url]; | 732 ResolveList(urls, motivation); |
| 466 info->SetUrl(url); // Initialize or DCHECK. | 733 } else { |
| 467 // TODO(jar): I need to discard names that have long since expired. | 734 BrowserThread::PostTask( |
| 468 // Currently we only add to the domain map :-/ | 735 BrowserThread::IO, |
| 469 | 736 FROM_HERE, |
| 470 DCHECK(info->HasUrl(url)); | 737 NewRunnableMethod(this, &Predictor::ResolveList, urls, motivation)); |
|
willchan no longer on Chromium
2011/08/10 07:04:08
As I mentioned elsewhere, instead of NewRunnableMe
rpetterson
2011/08/12 03:12:36
Done. Elsewhere as well.
| |
| 471 | 738 } |
| 472 if (!info->NeedsDnsUpdate()) { | 739 } |
| 473 info->DLogResultsStats("DNS PrefetchNotUpdated"); | 740 //----------------------------------------------------------------------------- |
| 474 return NULL; | 741 // Functions to handle saving of hostnames from one session to the next, to |
| 475 } | 742 // expedite startup times. |
| 476 | 743 |
| 477 info->SetQueuedState(motivation); | 744 static void SaveDnsPrefetchStateForNextStartupAndTrimOnIOThread( |
| 478 work_queue_.Push(url, motivation); | 745 base::ListValue* startup_list, |
| 479 StartSomeQueuedResolutions(); | 746 base::ListValue* referral_list, |
| 480 return info; | 747 base::WaitableEvent* completion, |
| 481 } | 748 Predictor* predictor) { |
| 482 | 749 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 483 void Predictor::StartSomeQueuedResolutions() { | 750 |
| 484 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 751 if (NULL == predictor) { |
| 485 | 752 completion->Signal(); |
| 486 while (!work_queue_.IsEmpty() && | 753 return; |
| 487 pending_lookups_.size() < max_concurrent_dns_lookups_) { | 754 } |
| 488 const GURL url(work_queue_.Pop()); | 755 predictor->SaveDnsPrefetchStateForNextStartupAndTrim( |
| 489 UrlInfo* info = &results_[url]; | 756 startup_list, referral_list, completion); |
| 490 DCHECK(info->HasUrl(url)); | 757 } |
| 491 info->SetAssignedState(); | 758 |
| 492 | 759 void Predictor::SaveStateForNextStartupAndTrim(PrefService* prefs) { |
| 493 if (CongestionControlPerformed(info)) { | 760 if (!predictor_enabled_) |
| 494 DCHECK(work_queue_.IsEmpty()); | 761 return; |
| 495 return; | 762 |
| 763 base::WaitableEvent completion(true, false); | |
| 764 | |
| 765 ListPrefUpdate update_startup_list(prefs, prefs::kDnsPrefetchingStartupList); | |
| 766 ListPrefUpdate update_referral_list(prefs, | |
| 767 prefs::kDnsPrefetchingHostReferralList); | |
| 768 if (BrowserThread::CurrentlyOn(BrowserThread::IO)) { | |
| 769 SaveDnsPrefetchStateForNextStartupAndTrimOnIOThread( | |
| 770 update_startup_list.Get(), | |
| 771 update_referral_list.Get(), | |
| 772 &completion, | |
| 773 this); | |
| 774 } else { | |
| 775 bool posted = BrowserThread::PostTask( | |
| 776 BrowserThread::IO, | |
| 777 FROM_HERE, | |
| 778 NewRunnableFunction( | |
| 779 SaveDnsPrefetchStateForNextStartupAndTrimOnIOThread, | |
| 780 update_startup_list.Get(), | |
| 781 update_referral_list.Get(), | |
| 782 &completion, | |
| 783 this)); | |
| 784 | |
| 785 // TODO(jar): Synchronous waiting for the IO thread is a potential source | |
| 786 // to deadlocks and should be investigated. See http://crbug.com/78451. | |
| 787 DCHECK(posted); | |
| 788 if (posted) | |
| 789 completion.Wait(); | |
| 790 } | |
| 791 } | |
| 792 | |
| 793 void Predictor::SaveDnsPrefetchStateForNextStartupAndTrim( | |
| 794 base::ListValue* startup_list, | |
| 795 base::ListValue* referral_list, | |
| 796 base::WaitableEvent* completion) { | |
| 797 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 798 if (initial_observer_.get()) | |
| 799 initial_observer_->GetInitialDnsResolutionList(startup_list); | |
| 800 | |
| 801 // Do at least one trim at shutdown, in case the user wasn't running long | |
| 802 // enough to do any regular trimming of referrers. | |
| 803 TrimReferrersNow(); | |
| 804 SerializeReferrers(referral_list); | |
| 805 | |
| 806 completion->Signal(); | |
| 807 } | |
| 808 | |
| 809 void Predictor::EnablePredictor(bool enable) { | |
| 810 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI) || | |
| 811 BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 812 | |
| 813 if (BrowserThread::CurrentlyOn(BrowserThread::IO)) { | |
| 814 EnablePredictorOnIOThread(enable); | |
| 815 } else { | |
| 816 BrowserThread::PostTask( | |
| 817 BrowserThread::IO, | |
| 818 FROM_HERE, | |
| 819 NewRunnableMethod(this, | |
| 820 &Predictor::EnablePredictorOnIOThread, | |
| 821 enable)); | |
| 822 } | |
| 823 | |
| 824 } | |
| 825 | |
| 826 void Predictor::EnablePredictorOnIOThread(bool enable) { | |
| 827 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 828 predictor_enabled_ = enable; | |
| 829 } | |
| 830 | |
| 831 enum SubresourceValue { | |
| 832 PRECONNECTION, | |
| 833 PRERESOLUTION, | |
| 834 TOO_NEW, | |
| 835 SUBRESOURCE_VALUE_MAX | |
| 836 }; | |
| 837 | |
| 838 void Predictor::PrepareFrameSubresources(const GURL& url) { | |
| 839 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 840 DCHECK_EQ(url.GetWithEmptyPath(), url); | |
| 841 Referrers::iterator it = referrers_.find(url); | |
| 842 if (referrers_.end() == it) { | |
| 843 // Only when we don't know anything about this url, make 2 connections | |
| 844 // available. We could do this completely via learning (by prepopulating | |
| 845 // the referrer_ list with this expected value), but it would swell the | |
| 846 // size of the list with all the "Leaf" nodes in the tree (nodes that don't | |
| 847 // load any subresources). If we learn about this resource, we will instead | |
| 848 // provide a more carefully estimated preconnection count. | |
| 849 if (preconnect_enabled_) | |
| 850 PreconnectOnIOThread(url, UrlInfo::SELF_REFERAL_MOTIVATED, 2); | |
| 851 return; | |
| 852 } | |
| 853 | |
| 854 Referrer* referrer = &(it->second); | |
| 855 referrer->IncrementUseCount(); | |
| 856 const UrlInfo::ResolutionMotivation motivation = | |
| 857 UrlInfo::LEARNED_REFERAL_MOTIVATED; | |
| 858 for (Referrer::iterator future_url = referrer->begin(); | |
| 859 future_url != referrer->end(); ++future_url) { | |
| 860 SubresourceValue evalution(TOO_NEW); | |
| 861 double connection_expectation = future_url->second.subresource_use_rate(); | |
| 862 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.PreconnectSubresourceExpectation", | |
| 863 static_cast<int>(connection_expectation * 100), | |
| 864 10, 5000, 50); | |
| 865 future_url->second.ReferrerWasObserved(); | |
| 866 if (preconnect_enabled_ && | |
| 867 connection_expectation > kPreconnectWorthyExpectedValue) { | |
| 868 evalution = PRECONNECTION; | |
| 869 future_url->second.IncrementPreconnectionCount(); | |
| 870 int count = static_cast<int>(std::ceil(connection_expectation)); | |
| 871 if (url.host() == future_url->first.host()) | |
| 872 ++count; | |
| 873 PreconnectOnIOThread(future_url->first, motivation, count); | |
| 874 } else if (connection_expectation > kDNSPreresolutionWorthyExpectedValue) { | |
| 875 evalution = PRERESOLUTION; | |
| 876 future_url->second.preresolution_increment(); | |
| 877 UrlInfo* queued_info = AppendToResolutionQueue(future_url->first, | |
| 878 motivation); | |
| 879 if (queued_info) | |
| 880 queued_info->SetReferringHostname(url); | |
| 496 } | 881 } |
| 497 | 882 UMA_HISTOGRAM_ENUMERATION("Net.PreconnectSubresourceEval", evalution, |
| 498 LookupRequest* request = new LookupRequest(this, host_resolver_, url); | 883 SUBRESOURCE_VALUE_MAX); |
| 499 int status = request->Start(); | 884 } |
| 500 if (status == net::ERR_IO_PENDING) { | |
| 501 // Will complete asynchronously. | |
| 502 pending_lookups_.insert(request); | |
| 503 peak_pending_lookups_ = std::max(peak_pending_lookups_, | |
| 504 pending_lookups_.size()); | |
| 505 } else { | |
| 506 // Completed synchronously (was already cached by HostResolver), or else | |
| 507 // there was (equivalently) some network error that prevents us from | |
| 508 // finding the name. Status net::OK means it was "found." | |
| 509 LookupFinished(request, url, status == net::OK); | |
| 510 delete request; | |
| 511 } | |
| 512 } | |
| 513 } | |
| 514 | |
| 515 bool Predictor::CongestionControlPerformed(UrlInfo* info) { | |
| 516 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 517 // Note: queue_duration is ONLY valid after we go to assigned state. | |
| 518 if (info->queue_duration() < max_dns_queue_delay_) | |
| 519 return false; | |
| 520 // We need to discard all entries in our queue, as we're keeping them waiting | |
| 521 // too long. By doing this, we'll have a chance to quickly service urgent | |
| 522 // resolutions, and not have a bogged down system. | |
| 523 while (true) { | |
| 524 info->RemoveFromQueue(); | |
| 525 if (work_queue_.IsEmpty()) | |
| 526 break; | |
| 527 info = &results_[work_queue_.Pop()]; | |
| 528 info->SetAssignedState(); | |
| 529 } | |
| 530 return true; | |
| 531 } | 885 } |
| 532 | 886 |
| 533 void Predictor::OnLookupFinished(LookupRequest* request, const GURL& url, | 887 void Predictor::OnLookupFinished(LookupRequest* request, const GURL& url, |
| 534 bool found) { | 888 bool found) { |
| 535 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 889 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 536 | 890 |
| 537 LookupFinished(request, url, found); | 891 LookupFinished(request, url, found); |
| 538 pending_lookups_.erase(request); | 892 pending_lookups_.erase(request); |
| 539 delete request; | 893 delete request; |
| 540 | 894 |
| 541 StartSomeQueuedResolutions(); | 895 StartSomeQueuedResolutions(); |
| 542 } | 896 } |
| 543 | 897 |
| 544 void Predictor::LookupFinished(LookupRequest* request, const GURL& url, | 898 void Predictor::LookupFinished(LookupRequest* request, const GURL& url, |
| 545 bool found) { | 899 bool found) { |
| 546 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 900 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 547 UrlInfo* info = &results_[url]; | 901 UrlInfo* info = &results_[url]; |
| 548 DCHECK(info->HasUrl(url)); | 902 DCHECK(info->HasUrl(url)); |
| 549 if (info->is_marked_to_delete()) { | 903 if (info->is_marked_to_delete()) { |
| 550 results_.erase(url); | 904 results_.erase(url); |
| 551 } else { | 905 } else { |
| 552 if (found) | 906 if (found) |
| 553 info->SetFoundState(); | 907 info->SetFoundState(); |
| 554 else | 908 else |
| 555 info->SetNoSuchNameState(); | 909 info->SetNoSuchNameState(); |
| 556 } | 910 } |
| 557 } | 911 } |
| 558 | 912 |
| 559 void Predictor::DiscardAllResults() { | 913 UrlInfo* Predictor::AppendToResolutionQueue( |
| 914 const GURL& url, | |
| 915 UrlInfo::ResolutionMotivation motivation) { | |
| 560 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 916 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 561 // Delete anything listed so far in this session that shows in about:dns. | 917 DCHECK(url.has_host()); |
| 562 referrers_.clear(); | |
| 563 | 918 |
| 919 if (shutdown_) | |
| 920 return NULL; | |
| 564 | 921 |
| 565 // Try to delete anything in our work queue. | 922 UrlInfo* info = &results_[url]; |
| 566 while (!work_queue_.IsEmpty()) { | 923 info->SetUrl(url); // Initialize or DCHECK. |
| 567 // Emulate processing cycle as though host was not found. | 924 // TODO(jar): I need to discard names that have long since expired. |
| 568 GURL url = work_queue_.Pop(); | 925 // Currently we only add to the domain map :-/ |
| 926 | |
| 927 DCHECK(info->HasUrl(url)); | |
| 928 | |
| 929 if (!info->NeedsDnsUpdate()) { | |
| 930 info->DLogResultsStats("DNS PrefetchNotUpdated"); | |
| 931 return NULL; | |
| 932 } | |
| 933 | |
| 934 info->SetQueuedState(motivation); | |
| 935 work_queue_.Push(url, motivation); | |
| 936 StartSomeQueuedResolutions(); | |
| 937 return info; | |
| 938 } | |
| 939 | |
| 940 bool Predictor::CongestionControlPerformed(UrlInfo* info) { | |
| 941 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 942 // Note: queue_duration is ONLY valid after we go to assigned state. | |
| 943 if (info->queue_duration() < max_dns_queue_delay_) | |
| 944 return false; | |
| 945 // We need to discard all entries in our queue, as we're keeping them waiting | |
| 946 // too long. By doing this, we'll have a chance to quickly service urgent | |
| 947 // resolutions, and not have a bogged down system. | |
| 948 while (true) { | |
| 949 info->RemoveFromQueue(); | |
| 950 if (work_queue_.IsEmpty()) | |
| 951 break; | |
| 952 info = &results_[work_queue_.Pop()]; | |
| 953 info->SetAssignedState(); | |
| 954 } | |
| 955 return true; | |
| 956 } | |
| 957 | |
| 958 void Predictor::StartSomeQueuedResolutions() { | |
| 959 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 960 | |
| 961 while (!work_queue_.IsEmpty() && | |
| 962 pending_lookups_.size() < max_concurrent_dns_lookups_) { | |
| 963 const GURL url(work_queue_.Pop()); | |
| 569 UrlInfo* info = &results_[url]; | 964 UrlInfo* info = &results_[url]; |
| 570 DCHECK(info->HasUrl(url)); | 965 DCHECK(info->HasUrl(url)); |
| 571 info->SetAssignedState(); | 966 info->SetAssignedState(); |
| 572 info->SetNoSuchNameState(); | |
| 573 } | |
| 574 // Now every result_ is either resolved, or is being resolved | |
| 575 // (see LookupRequest). | |
| 576 | 967 |
| 577 // Step through result_, recording names of all hosts that can't be erased. | 968 if (CongestionControlPerformed(info)) { |
| 578 // We can't erase anything being worked on. | 969 DCHECK(work_queue_.IsEmpty()); |
| 579 Results assignees; | 970 return; |
| 580 for (Results::iterator it = results_.begin(); results_.end() != it; ++it) { | |
| 581 GURL url(it->first); | |
| 582 UrlInfo* info = &it->second; | |
| 583 DCHECK(info->HasUrl(url)); | |
| 584 if (info->is_assigned()) { | |
| 585 info->SetPendingDeleteState(); | |
| 586 assignees[url] = *info; | |
| 587 } | 971 } |
| 588 } | |
| 589 DCHECK(assignees.size() <= max_concurrent_dns_lookups_); | |
| 590 results_.clear(); | |
| 591 // Put back in the names being worked on. | |
| 592 for (Results::iterator it = assignees.begin(); assignees.end() != it; ++it) { | |
| 593 DCHECK(it->second.is_marked_to_delete()); | |
| 594 results_[it->first] = it->second; | |
| 595 } | |
| 596 } | |
| 597 | 972 |
| 598 void Predictor::TrimReferrersNow() { | 973 LookupRequest* request = new LookupRequest(this, host_resolver_, url); |
| 599 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 974 int status = request->Start(); |
| 600 // Just finish up work if an incremental trim is in progress. | 975 if (status == net::ERR_IO_PENDING) { |
| 601 if (urls_being_trimmed_.empty()) | 976 // Will complete asynchronously. |
| 602 LoadUrlsForTrimming(); | 977 pending_lookups_.insert(request); |
| 603 IncrementalTrimReferrers(true); // Do everything now. | 978 peak_pending_lookups_ = std::max(peak_pending_lookups_, |
| 604 } | 979 pending_lookups_.size()); |
| 605 | 980 } else { |
| 606 void Predictor::SerializeReferrers(ListValue* referral_list) { | 981 // Completed synchronously (was already cached by HostResolver), or else |
| 607 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 982 // there was (equivalently) some network error that prevents us from |
| 608 referral_list->Clear(); | 983 // finding the name. Status net::OK means it was "found." |
| 609 referral_list->Append(new FundamentalValue(PREDICTOR_REFERRER_VERSION)); | 984 LookupFinished(request, url, status == net::OK); |
| 610 for (Referrers::const_iterator it = referrers_.begin(); | 985 delete request; |
| 611 it != referrers_.end(); ++it) { | |
| 612 // Serialize the list of subresource names. | |
| 613 Value* subresource_list(it->second.Serialize()); | |
| 614 | |
| 615 // Create a list for each referer. | |
| 616 ListValue* motivator(new ListValue); | |
| 617 motivator->Append(new StringValue(it->first.spec())); | |
| 618 motivator->Append(subresource_list); | |
| 619 | |
| 620 referral_list->Append(motivator); | |
| 621 } | |
| 622 } | |
| 623 | |
| 624 void Predictor::DeserializeReferrers(const ListValue& referral_list) { | |
| 625 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 626 int format_version = -1; | |
| 627 if (referral_list.GetSize() > 0 && | |
| 628 referral_list.GetInteger(0, &format_version) && | |
| 629 format_version == PREDICTOR_REFERRER_VERSION) { | |
| 630 for (size_t i = 1; i < referral_list.GetSize(); ++i) { | |
| 631 ListValue* motivator; | |
| 632 if (!referral_list.GetList(i, &motivator)) { | |
| 633 NOTREACHED(); | |
| 634 return; | |
| 635 } | |
| 636 std::string motivating_url_spec; | |
| 637 if (!motivator->GetString(0, &motivating_url_spec)) { | |
| 638 NOTREACHED(); | |
| 639 return; | |
| 640 } | |
| 641 | |
| 642 Value* subresource_list; | |
| 643 if (!motivator->Get(1, &subresource_list)) { | |
| 644 NOTREACHED(); | |
| 645 return; | |
| 646 } | |
| 647 | |
| 648 referrers_[GURL(motivating_url_spec)].Deserialize(*subresource_list); | |
| 649 } | 986 } |
| 650 } | 987 } |
| 651 } | 988 } |
| 652 | 989 |
| 653 void Predictor::TrimReferrers() { | 990 void Predictor::TrimReferrers() { |
| 654 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 991 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 655 if (!urls_being_trimmed_.empty()) | 992 if (!urls_being_trimmed_.empty()) |
| 656 return; // There is incremental trimming in progress already. | 993 return; // There is incremental trimming in progress already. |
| 657 | 994 |
| 658 // Check to see if it is time to trim yet. | 995 // Check to see if it is time to trim yet. |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 671 it != referrers_.end(); ++it) | 1008 it != referrers_.end(); ++it) |
| 672 urls_being_trimmed_.push_back(it->first); | 1009 urls_being_trimmed_.push_back(it->first); |
| 673 UMA_HISTOGRAM_COUNTS("Net.PredictionTrimSize", urls_being_trimmed_.size()); | 1010 UMA_HISTOGRAM_COUNTS("Net.PredictionTrimSize", urls_being_trimmed_.size()); |
| 674 } | 1011 } |
| 675 | 1012 |
| 676 void Predictor::PostIncrementalTrimTask() { | 1013 void Predictor::PostIncrementalTrimTask() { |
| 677 if (urls_being_trimmed_.empty()) | 1014 if (urls_being_trimmed_.empty()) |
| 678 return; | 1015 return; |
| 679 MessageLoop::current()->PostDelayedTask( | 1016 MessageLoop::current()->PostDelayedTask( |
| 680 FROM_HERE, | 1017 FROM_HERE, |
| 681 trim_task_factory_.NewRunnableMethod(&Predictor::IncrementalTrimReferrers, | 1018 trim_task_factory_->NewRunnableMethod( |
| 682 false), | 1019 &Predictor::IncrementalTrimReferrers, false), |
| 683 kDurationBetweenTrimmingIncrements.InMilliseconds()); | 1020 kDurationBetweenTrimmingIncrements.InMilliseconds()); |
| 684 } | 1021 } |
| 685 | 1022 |
| 686 void Predictor::IncrementalTrimReferrers(bool trim_all_now) { | 1023 void Predictor::IncrementalTrimReferrers(bool trim_all_now) { |
| 687 size_t trim_count = urls_being_trimmed_.size(); | 1024 size_t trim_count = urls_being_trimmed_.size(); |
| 688 if (!trim_all_now) | 1025 if (!trim_all_now) |
| 689 trim_count = std::min(trim_count, kUrlsTrimmedPerIncrement); | 1026 trim_count = std::min(trim_count, kUrlsTrimmedPerIncrement); |
| 690 while (trim_count-- != 0) { | 1027 while (trim_count-- != 0) { |
| 691 Referrers::iterator it = referrers_.find(urls_being_trimmed_.back()); | 1028 Referrers::iterator it = referrers_.find(urls_being_trimmed_.back()); |
| 692 urls_being_trimmed_.pop_back(); | 1029 urls_being_trimmed_.pop_back(); |
| 693 if (it == referrers_.end()) | 1030 if (it == referrers_.end()) |
| 694 continue; // Defensive code: It got trimmed away already. | 1031 continue; // Defensive code: It got trimmed away already. |
| 695 if (!it->second.Trim(kReferrerTrimRatio, kDiscardableExpectedValue)) | 1032 if (!it->second.Trim(kReferrerTrimRatio, kDiscardableExpectedValue)) |
| 696 referrers_.erase(it); | 1033 referrers_.erase(it); |
| 697 } | 1034 } |
| 698 PostIncrementalTrimTask(); | 1035 PostIncrementalTrimTask(); |
| 699 } | 1036 } |
| 700 | 1037 |
| 701 //------------------------------------------------------------------------------ | 1038 // ---------------------- End UI methods. ------------------------------------- |
| 1039 | |
| 1040 //----------------------------------------------------------------------------- | |
| 702 | 1041 |
| 703 Predictor::HostNameQueue::HostNameQueue() { | 1042 Predictor::HostNameQueue::HostNameQueue() { |
| 704 } | 1043 } |
| 705 | 1044 |
| 706 Predictor::HostNameQueue::~HostNameQueue() { | 1045 Predictor::HostNameQueue::~HostNameQueue() { |
| 707 } | 1046 } |
| 708 | 1047 |
| 709 void Predictor::HostNameQueue::Push(const GURL& url, | 1048 void Predictor::HostNameQueue::Push(const GURL& url, |
| 710 UrlInfo::ResolutionMotivation motivation) { | 1049 UrlInfo::ResolutionMotivation motivation) { |
| 711 switch (motivation) { | 1050 switch (motivation) { |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 727 | 1066 |
| 728 GURL Predictor::HostNameQueue::Pop() { | 1067 GURL Predictor::HostNameQueue::Pop() { |
| 729 DCHECK(!IsEmpty()); | 1068 DCHECK(!IsEmpty()); |
| 730 std::queue<GURL> *queue(rush_queue_.empty() ? &background_queue_ | 1069 std::queue<GURL> *queue(rush_queue_.empty() ? &background_queue_ |
| 731 : &rush_queue_); | 1070 : &rush_queue_); |
| 732 GURL url(queue->front()); | 1071 GURL url(queue->front()); |
| 733 queue->pop(); | 1072 queue->pop(); |
| 734 return url; | 1073 return url; |
| 735 } | 1074 } |
| 736 | 1075 |
| 737 void Predictor::DeserializeReferrersThenDelete(ListValue* referral_list) { | 1076 //----------------------------------------------------------------------------- |
| 738 DeserializeReferrers(*referral_list); | 1077 // Member definitions for InitialObserver class. |
| 739 delete referral_list; | 1078 |
| 1079 void Predictor::InitialObserver::Append(const GURL& url, | |
| 1080 Predictor* predictor) { | |
| 1081 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 1082 | |
| 1083 // TODO(rlp): Do we really need the predictor check here? | |
| 1084 if (NULL == predictor) | |
| 1085 return; | |
| 1086 if (kStartupResolutionCount <= first_navigations_.size()) | |
| 1087 return; | |
| 1088 | |
| 1089 DCHECK(url.SchemeIs("http") || url.SchemeIs("https")); | |
| 1090 DCHECK_EQ(url, Predictor::CanonicalizeUrl(url)); | |
| 1091 if (first_navigations_.find(url) == first_navigations_.end()) | |
| 1092 first_navigations_[url] = base::TimeTicks::Now(); | |
| 740 } | 1093 } |
| 741 | 1094 |
| 1095 void Predictor::InitialObserver::GetInitialDnsResolutionList( | |
| 1096 base::ListValue* startup_list) { | |
| 1097 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 1098 DCHECK(startup_list); | |
| 1099 startup_list->Clear(); | |
| 1100 DCHECK_EQ(0u, startup_list->GetSize()); | |
| 1101 startup_list->Append(new FundamentalValue(kPredictorStartupFormatVersion)); | |
| 1102 for (FirstNavigations::iterator it = first_navigations_.begin(); | |
| 1103 it != first_navigations_.end(); | |
| 1104 ++it) { | |
| 1105 DCHECK(it->first == Predictor::CanonicalizeUrl(it->first)); | |
| 1106 startup_list->Append(new StringValue(it->first.spec())); | |
| 1107 } | |
| 1108 } | |
| 742 | 1109 |
| 743 //------------------------------------------------------------------------------ | 1110 void Predictor::InitialObserver::GetFirstResolutionsHtml( |
| 1111 std::string* output) { | |
| 1112 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
| 1113 | |
| 1114 UrlInfo::UrlInfoTable resolution_list; | |
| 1115 { | |
| 1116 for (FirstNavigations::iterator it(first_navigations_.begin()); | |
| 1117 it != first_navigations_.end(); | |
| 1118 it++) { | |
| 1119 UrlInfo info; | |
| 1120 info.SetUrl(it->first); | |
| 1121 info.set_time(it->second); | |
| 1122 resolution_list.push_back(info); | |
| 1123 } | |
| 1124 } | |
| 1125 UrlInfo::GetHtmlTable(resolution_list, | |
| 1126 "Future startups will prefetch DNS records for ", false, output); | |
| 1127 } | |
| 1128 | |
| 1129 //----------------------------------------------------------------------------- | |
| 744 // Helper functions | 1130 // Helper functions |
| 745 //------------------------------------------------------------------------------ | 1131 //----------------------------------------------------------------------------- |
| 746 | 1132 |
| 747 // static | 1133 // static |
| 748 GURL Predictor::CanonicalizeUrl(const GURL& url) { | 1134 GURL Predictor::CanonicalizeUrl(const GURL& url) { |
| 749 if (!url.has_host()) | 1135 if (!url.has_host()) |
| 750 return GURL::EmptyGURL(); | 1136 return GURL::EmptyGURL(); |
| 751 | 1137 |
| 752 std::string scheme; | 1138 std::string scheme; |
| 753 if (url.has_scheme()) { | 1139 if (url.has_scheme()) { |
| 754 scheme = url.scheme(); | 1140 scheme = url.scheme(); |
| 755 if (scheme != "http" && scheme != "https") | 1141 if (scheme != "http" && scheme != "https") |
| 756 return GURL::EmptyGURL(); | 1142 return GURL::EmptyGURL(); |
| 757 if (url.has_port()) | 1143 if (url.has_port()) |
| 758 return url.GetWithEmptyPath(); | 1144 return url.GetWithEmptyPath(); |
| 759 } else { | 1145 } else { |
| 760 scheme = "http"; | 1146 scheme = "http"; |
| 761 } | 1147 } |
| 762 | 1148 |
| 763 // If we omit a port, it will default to 80 or 443 as appropriate. | 1149 // If we omit a port, it will default to 80 or 443 as appropriate. |
| 764 std::string colon_plus_port; | 1150 std::string colon_plus_port; |
| 765 if (url.has_port()) | 1151 if (url.has_port()) |
| 766 colon_plus_port = ":" + url.port(); | 1152 colon_plus_port = ":" + url.port(); |
| 767 | 1153 |
| 768 return GURL(scheme + "://" + url.host() + colon_plus_port); | 1154 return GURL(scheme + "://" + url.host() + colon_plus_port); |
| 769 } | 1155 } |
| 770 | 1156 |
| 771 | |
| 772 } // namespace chrome_browser_net | 1157 } // namespace chrome_browser_net |
| OLD | NEW |