Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(112)

Unified Diff: net/base/host_resolver_impl.cc

Issue 9369045: [net] HostResolverImpl + DnsTransaction + DnsConfigService = Asynchronous DNS ready for experiments. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Rebased to master. Created 8 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: net/base/host_resolver_impl.cc
diff --git a/net/base/host_resolver_impl.cc b/net/base/host_resolver_impl.cc
index d9330e213cebb1405002629e2fda5e6042329fac..b2bfecfe815cacebff95ac14b3ed04454059ea16 100644
--- a/net/base/host_resolver_impl.cc
+++ b/net/base/host_resolver_impl.cc
@@ -24,12 +24,14 @@
#include "base/message_loop_proxy.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/histogram.h"
+#include "base/rand_util.h"
#include "base/stl_util.h"
#include "base/string_util.h"
#include "base/threading/worker_pool.h"
#include "base/time.h"
#include "base/utf_string_conversions.h"
#include "base/values.h"
+#include "net/base/address_family.h"
#include "net/base/address_list.h"
#include "net/base/address_list_net_log_param.h"
#include "net/base/dns_reloader.h"
@@ -38,6 +40,12 @@
#include "net/base/net_errors.h"
#include "net/base/net_log.h"
#include "net/base/net_util.h"
+#include "net/dns/dns_config_service.h"
+#include "net/dns/dns_protocol.h"
+#include "net/dns/dns_response.h"
+#include "net/dns/dns_session.h"
+#include "net/dns/dns_transaction.h"
+#include "net/socket/client_socket_factory.h"
#if defined(OS_WIN)
#include "net/base/winsock_init.h"
@@ -54,6 +62,9 @@ const size_t kMaxHostLength = 4096;
// Default TTL for successful resolutions with ProcTask.
const unsigned kCacheEntryTTLSeconds = 60;
+// Default TTL for unsuccessful resolutions with ProcTask.
+const unsigned kNegativeCacheEntryTTLSeconds = 0;
+
// Maximum of 8 concurrent resolver threads (excluding retries).
// Some routers (or resolvers) appear to start to provide host-not-found if
// too many simultaneous resolutions are pending. This number needs to be
@@ -155,11 +166,9 @@ class CallSystemHostResolverProc : public HostResolverProc {
};
// Extra parameters to attach to the NetLog when the resolve failed.
-class HostResolveFailedParams : public NetLog::EventParameters {
+class ProcTaskFailedParams : public NetLog::EventParameters {
public:
- HostResolveFailedParams(uint32 attempt_number,
- int net_error,
- int os_error)
+ ProcTaskFailedParams(uint32 attempt_number, int net_error, int os_error)
: attempt_number_(attempt_number),
net_error_(net_error),
os_error_(os_error) {
@@ -201,6 +210,26 @@ class HostResolveFailedParams : public NetLog::EventParameters {
const int os_error_;
};
+// Extra parameters to attach to the NetLog when the DnsTask failed.
+class DnsTaskFailedParams : public NetLog::EventParameters {
+ public:
+ DnsTaskFailedParams(int net_error, int dns_error)
+ : net_error_(net_error), dns_error_(dns_error) {
+ }
+
+ virtual Value* ToValue() const OVERRIDE {
+ DictionaryValue* dict = new DictionaryValue();
+ dict->SetInteger("net_error", net_error_);
+ if (dns_error_)
+ dict->SetInteger("dns_error", dns_error_);
+ return dict;
+ }
+
+ private:
+ const int net_error_;
+ const int dns_error_;
+};
+
// Parameters representing the information in a RequestInfo object, along with
// the associated NetLog::Source.
class RequestInfoParameters : public NetLog::EventParameters {
@@ -229,8 +258,7 @@ class RequestInfoParameters : public NetLog::EventParameters {
const NetLog::Source source_;
};
-// Parameters associated with the creation of a HostResolverImpl::Job
-// or a HostResolverImpl::ProcTask.
+// Parameters associated with the creation of a HostResolverImpl::Job.
class JobCreationParameters : public NetLog::EventParameters {
public:
JobCreationParameters(const std::string& host,
@@ -294,7 +322,7 @@ void LogFinishRequest(const BoundNetLog& source_net_log,
int os_error) {
scoped_refptr<NetLog::EventParameters> params;
if (net_error != OK) {
- params = new HostResolveFailedParams(0, net_error, os_error);
+ params = new ProcTaskFailedParams(0, net_error, os_error);
}
request_net_log.EndEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_REQUEST, params);
@@ -315,8 +343,8 @@ void LogCancelRequest(const BoundNetLog& source_net_log,
// Keeps track of the highest priority.
class PriorityTracker {
public:
- PriorityTracker()
- : highest_priority_(IDLE), total_count_(0) {
+ explicit PriorityTracker(RequestPriority priority)
+ : highest_priority_(priority), total_count_(0) {
memset(counts_, 0, sizeof(counts_));
}
@@ -359,9 +387,50 @@ class PriorityTracker {
//-----------------------------------------------------------------------------
+// Convenience wrapper for PrioritizedDispatcher::Handle so that we don't forget
+// to update it when cancelling or changing the priority.
mmenke 2012/02/15 19:54:16 nit: canceling.
szym 2012/02/15 20:28:05 I think the consensus was that it's okay to use no
+// TODO(szym): consider making PrioritizedDispatcher::Handle into this.
+class DispatcherHandle {
cbentzel 2012/02/15 19:48:35 Is this needed as much anymore, since you're restr
szym 2012/02/15 20:28:05 I prefer this over having to remember to set handl
+ public:
+ DispatcherHandle() : dispatcher_(NULL) {}
+ DispatcherHandle(PrioritizedDispatcher* dispatcher,
+ const PrioritizedDispatcher::Handle& handle)
+ : dispatcher_(dispatcher), handle_(handle) {
+ if (handle_.is_null())
+ dispatcher_ = NULL;
+ }
+
+ bool is_null() const {
+ return dispatcher_ == NULL;
+ }
+
+ void Reset() {
+ dispatcher_ = NULL;
+ handle_ = PrioritizedDispatcher::Handle();
+ }
+
+ void ChangePriority(RequestPriority priority) {
+ DCHECK(!is_null());
+ handle_ = dispatcher_->ChangePriority(handle_, priority);
+ }
+
+ void Cancel() {
+ DCHECK(!is_null());
+ dispatcher_->Cancel(handle_);
+ Reset();
+ }
+
+ private:
+ PrioritizedDispatcher* dispatcher_;
+ PrioritizedDispatcher::Handle handle_;
+};
+
+//-----------------------------------------------------------------------------
+
HostResolver* CreateHostResolver(size_t max_concurrent_resolves,
size_t max_retry_attempts,
- bool use_cache,
+ HostCache* cache,
+ scoped_ptr<DnsConfigService> config_service,
NetLog* net_log) {
if (max_concurrent_resolves == HostResolver::kDefaultParallelism)
max_concurrent_resolves = kDefaultMaxProcTasks;
@@ -372,9 +441,10 @@ HostResolver* CreateHostResolver(size_t max_concurrent_resolves,
PrioritizedDispatcher::Limits limits(NUM_PRIORITIES, max_concurrent_resolves);
HostResolverImpl* resolver = new HostResolverImpl(
- use_cache ? HostCache::CreateDefaultCache() : NULL,
+ cache,
limits,
HostResolverImpl::ProcTaskParams(NULL, max_retry_attempts),
+ config_service.Pass(),
net_log);
return resolver;
@@ -389,7 +459,8 @@ HostResolver* CreateSystemHostResolver(size_t max_concurrent_resolves,
NetLog* net_log) {
return CreateHostResolver(max_concurrent_resolves,
max_retry_attempts,
- true /* use_cache */,
+ HostCache::CreateDefaultCache(),
+ scoped_ptr<DnsConfigService>(NULL),
net_log);
}
@@ -398,7 +469,21 @@ HostResolver* CreateNonCachingSystemHostResolver(size_t max_concurrent_resolves,
NetLog* net_log) {
return CreateHostResolver(max_concurrent_resolves,
max_retry_attempts,
- false /* use_cache */,
+ NULL,
+ scoped_ptr<DnsConfigService>(NULL),
+ net_log);
+}
+
+HostResolver* CreateAsyncHostResolver(size_t max_concurrent_resolves,
+ size_t max_retry_attempts,
+ NetLog* net_log) {
+ scoped_ptr<DnsConfigService> config_service =
+ DnsConfigService::CreateSystemService();
+ config_service->Watch();
cbentzel 2012/02/15 19:48:35 Should Watch be called by HostResolverImpl, so it
szym 2012/02/15 20:28:05 I'll move it although it doesn't matter: DnsConfig
+ return CreateHostResolver(max_concurrent_resolves,
+ max_retry_attempts,
+ HostCache::CreateDefaultCache(),
+ config_service.Pass(),
net_log);
}
@@ -505,7 +590,9 @@ class HostResolverImpl::Request {
class HostResolverImpl::ProcTask
: public base::RefCountedThreadSafe<HostResolverImpl::ProcTask> {
public:
- typedef base::Callback<void(int, int, const AddressList&)> Callback;
+ typedef base::Callback<void(int net_error,
+ int os_error,
+ const AddressList& addr_list)> Callback;
ProcTask(const Key& key,
const ProcTaskParams& params,
@@ -519,22 +606,14 @@ class HostResolverImpl::ProcTask
completed_attempt_number_(0),
completed_attempt_error_(ERR_UNEXPECTED),
had_non_speculative_request_(false),
- net_log_(BoundNetLog::Make(
- job_net_log.net_log(),
- NetLog::SOURCE_HOST_RESOLVER_IMPL_PROC_TASK)) {
+ net_log_(job_net_log) {
if (!params_.resolver_proc)
params_.resolver_proc = HostResolverProc::GetDefault();
// If default is unset, use the system proc.
if (!params_.resolver_proc)
params_.resolver_proc = new CallSystemHostResolverProc();
- job_net_log.AddEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_CREATE_PROC_TASK,
- new NetLogSourceParameter("source_dependency",
- net_log_.source()));
-
- net_log_.BeginEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_PROC_TASK,
- new JobCreationParameters(key_.hostname,
- job_net_log.source()));
+ net_log_.BeginEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_PROC_TASK, NULL);
}
void Start() {
@@ -551,8 +630,6 @@ class HostResolverImpl::ProcTask
if (was_canceled())
return;
- net_log_.AddEvent(NetLog::TYPE_CANCELLED, NULL);
-
callback_.Reset();
net_log_.EndEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_PROC_TASK, NULL);
@@ -657,14 +734,13 @@ class HostResolverImpl::ProcTask
bool was_retry_attempt = attempt_number > 1;
// Ideally the following code would be part of host_resolver_proc.cc,
- // however it isn't safe to call NetworkChangeNotifier from worker
- // threads. So we do it here on the IO thread instead.
+ // however it isn't safe to call NetworkChangeNotifier from worker threads.
+ // So we do it here on the IO thread instead.
if (error != OK && NetworkChangeNotifier::IsOffline())
error = ERR_INTERNET_DISCONNECTED;
- // If this is the first attempt that is finishing later, then record
- // data for the first attempt. Won't contaminate with retry attempt's
- // data.
+ // If this is the first attempt that is finishing later, then record data
+ // for the first attempt. Won't contaminate with retry attempt's data.
if (!was_retry_attempt)
RecordPerformanceHistograms(start_time, error, os_error);
@@ -675,7 +751,7 @@ class HostResolverImpl::ProcTask
scoped_refptr<NetLog::EventParameters> params;
if (error != OK) {
- params = new HostResolveFailedParams(attempt_number, error, os_error);
+ params = new ProcTaskFailedParams(attempt_number, error, os_error);
} else {
params = new NetLogIntegerParameter("attempt_number", attempt_number);
}
@@ -696,7 +772,7 @@ class HostResolverImpl::ProcTask
}
if (error != OK) {
- params = new HostResolveFailedParams(0, error, os_error);
+ params = new ProcTaskFailedParams(0, error, os_error);
} else {
params = new AddressListNetLogParam(results_);
}
@@ -891,6 +967,8 @@ class HostResolverImpl::ProcTask
//-----------------------------------------------------------------------------
// Represents a request to the worker pool for a "probe for IPv6 support" call.
+//
+// TODO(szym): This could also be replaced with PostTaskAndReply and Callbacks.
class HostResolverImpl::IPv6ProbeJob
: public base::RefCountedThreadSafe<HostResolverImpl::IPv6ProbeJob> {
public:
@@ -958,17 +1036,88 @@ class HostResolverImpl::IPv6ProbeJob
//-----------------------------------------------------------------------------
+// Resolves the hostname using DnsTransaction.
+// TODO(szym): This could be moved to separate source file as well.
+class HostResolverImpl::DnsTask {
+ public:
+ typedef base::Callback<void(int net_error,
+ const AddressList& addr_list,
+ base::TimeDelta ttl)> Callback;
+
+ DnsTask(DnsTransactionFactory* factory,
+ const Key& key,
+ const Callback& callback,
+ const BoundNetLog& job_net_log)
+ : callback_(callback), net_log_(job_net_log) {
+ DCHECK(factory);
+ DCHECK(!callback.is_null());
+
+ // For now we treat ADDRESS_FAMILY_UNSPEC as if it was IPV4.
+ uint16 qtype = (key.address_family == ADDRESS_FAMILY_IPV6)
+ ? dns_protocol::kTypeAAAA
+ : dns_protocol::kTypeA;
+ // TODO(szym): Implement "happy eyeballs".
+ transaction_ = factory->CreateTransaction(
+ key.hostname,
+ qtype,
+ base::Bind(&DnsTask::OnTransactionComplete, base::Unretained(this)),
+ net_log_);
+ DCHECK(transaction_.get());
+ }
+
+ int Start() {
+ net_log_.BeginEvent(
+ NetLog::TYPE_HOST_RESOLVER_IMPL_DNS_TASK,
+ new NetLogSourceParameter("source_dependency",
+ transaction_->net_log().source()));
+ return transaction_->Start();
+ }
+
+ void OnTransactionComplete(DnsTransaction* transaction,
+ int net_error,
+ const DnsResponse* response) {
+ // TODO(szym): Record performance histograms.
+ DnsResponse::Result result = DnsResponse::DNS_SUCCESS;
mmenke 2012/02/15 19:54:16 Fine with this as-is, but does seem a little weird
szym 2012/02/15 20:28:05 It is equivalent to setting os_error = 0 if we don
+ if (net_error == OK) {
+ AddressList addr_list;
+ base::TimeDelta ttl;
+ result = response->ParseToAddressList(&addr_list, &ttl);
+ if (result == DnsResponse::DNS_SUCCESS) {
+ net_log_.EndEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_DNS_TASK,
+ new AddressListNetLogParam(addr_list));
+ callback_.Run(net_error, addr_list, ttl);
mmenke 2012/02/15 19:54:16 Suggest you add a comment that we've been deleted
+ return;
+ }
+ net_error = ERR_DNS_MALFORMED_RESPONSE;
+ }
+ callback_.Run(net_error, AddressList(), base::TimeDelta());
+ net_log_.EndEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_DNS_TASK,
mmenke 2012/02/15 19:54:16 Suggest you move this line up, and delete |this| i
szym 2012/02/15 20:28:05 Good catch. This is a bug.
+ new DnsTaskFailedParams(net_error, result));
+ }
+
+ private:
+ // The listener to the results of this DnsTask.
+ Callback callback_;
+
+ const BoundNetLog net_log_;
+
+ scoped_ptr<DnsTransaction> transaction_;
+};
+
+//-----------------------------------------------------------------------------
+
// Aggregates all Requests for the same Key. Dispatched via PriorityDispatch.
-// Spawns ProcTask when started.
class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
public:
// Creates new job for |key| where |request_net_log| is bound to the
- // request that spawned it.
+ // request that spawned it and |priority| is the initial priority.
Job(HostResolverImpl* resolver,
const Key& key,
- const BoundNetLog& request_net_log)
+ const BoundNetLog& request_net_log,
+ RequestPriority priority)
: resolver_(resolver->AsWeakPtr()),
key_(key),
+ priority_tracker_(priority),
had_non_speculative_request_(false),
net_log_(BoundNetLog::Make(request_net_log.net_log(),
NetLog::SOURCE_HOST_RESOLVER_IMPL_JOB)),
@@ -985,11 +1134,16 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
virtual ~Job() {
if (net_error_ == ERR_IO_PENDING) {
if (is_running()) {
+ // |resolver_| was destroyed with this Job still pending.
+ // Clean-up, record in the log, but don't run any callbacks.
DCHECK_EQ(ERR_IO_PENDING, net_error_);
- proc_task_->Cancel();
- proc_task_ = NULL;
+ if (is_proc_running()) {
+ proc_task_->Cancel();
+ proc_task_ = NULL;
+ }
net_error_ = ERR_ABORTED;
} else {
+ // The last Request for this Job has been cancelled.
net_log_.AddEvent(NetLog::TYPE_CANCELLED, NULL);
net_error_ = OK; // For NetLog.
}
@@ -1009,10 +1163,6 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
STLDeleteElements(&requests_);
}
- HostResolverImpl* resolver() const {
- return resolver_;
- }
-
RequestPriority priority() const {
return priority_tracker_.highest_priority();
}
@@ -1030,22 +1180,19 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
return net_error_;
}
- // Used by HostResolverImpl with |dispatcher_|.
- const PrioritizedDispatcher::Handle& handle() const {
- return handle_;
+ void AddToDispatcher(PrioritizedDispatcher* dispatcher) {
+ DCHECK(handle_.is_null());
+ handle_ = DispatcherHandle(dispatcher, dispatcher->Add(this, priority()));
}
- void set_handle(const PrioritizedDispatcher::Handle& handle) {
- handle_ = handle;
+ bool IsWaitingInDispatch() const {
+ return !handle_.is_null();
}
- // The Job will own |req| and destroy it in ~Job.
- void AddRequest(Request* req) {
+ void AddRequest(scoped_ptr<Request> req) {
DCHECK_EQ(key_.hostname, req->info().hostname());
req->set_job(this);
- requests_.push_back(req);
-
priority_tracker_.Add(req->info().priority());
req->request_net_log().AddEvent(
@@ -1064,6 +1211,11 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
if (proc_task_)
proc_task_->set_had_non_speculative_request();
}
+
+ requests_.push_back(req.release());
+
+ if (!handle_.is_null())
+ handle_.ChangePriority(priority());
}
void CancelRequest(Request* req) {
@@ -1078,44 +1230,76 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
NetLog::TYPE_HOST_RESOLVER_IMPL_JOB_REQUEST_DETACH,
make_scoped_refptr(new JobAttachParameters(
req->request_net_log().source(), priority())));
+
+ if (!handle_.is_null()) {
+ if (num_active_requests() > 0) {
+ handle_.ChangePriority(priority());
+ } else {
+ handle_.Cancel();
+ }
+ }
}
// Aborts and destroys the job, completes all requests as aborted.
void Abort() {
// Job should only be aborted if it's running.
DCHECK(is_running());
- proc_task_->Cancel();
- proc_task_ = NULL;
+ if (is_proc_running()) {
+ proc_task_->Cancel();
+ proc_task_ = NULL;
+ }
+ dns_task_.reset();
net_error_ = ERR_ABORTED;
os_error_ = 0;
- CompleteRequests(AddressList());
+ CompleteRequests(AddressList(), base::TimeDelta());
}
- bool is_running() const {
+ bool is_dns_running() const {
+ return dns_task_.get() != NULL;
+ }
+
+ bool is_proc_running() const {
return proc_task_.get() != NULL;
}
+ bool is_running() const {
+ return is_dns_running() || is_proc_running();
+ }
+
// Called by HostResolverImpl when this job is evicted due to queue overflow.
void OnEvicted() {
// Must not be running.
DCHECK(!is_running());
- handle_ = PrioritizedDispatcher::Handle();
+ handle_.Reset();
net_log_.AddEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_JOB_EVICTED, NULL);
// This signals to CompleteRequests that this job never ran.
net_error_ = ERR_HOST_RESOLVER_QUEUE_TOO_LARGE;
os_error_ = 0;
- CompleteRequests(AddressList());
+ CompleteRequests(AddressList(), base::TimeDelta());
}
// PriorityDispatch::Job interface.
virtual void Start() OVERRIDE {
DCHECK(!is_running());
- handle_ = PrioritizedDispatcher::Handle();
+ handle_.Reset();
net_log_.AddEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_JOB_STARTED, NULL);
+ if (resolver_->dns_transaction_factory_.get()) {
+ StartDnsTask();
+ } else {
+ StartProcTask();
+ }
+ }
+
+ private:
+ // TODO(szym): Since DnsTransaction does not consume threads, we can increase
+ // the limits on |dispatcher_|. But in order to keep the number of WorkerPool
+ // threads low, we will need to use an "inner" PrioritizedDispatcher with
+ // tighter limits.
+ void StartProcTask() {
proc_task_ = new ProcTask(
key_,
resolver_->proc_params_,
@@ -1129,33 +1313,70 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
proc_task_->Start();
}
- private:
// Called by ProcTask when it completes.
void OnProcTaskComplete(int net_error, int os_error,
- const AddressList& addrlist) {
- DCHECK(is_running());
+ const AddressList& addr_list) {
+ DCHECK(is_proc_running());
proc_task_ = NULL;
net_error_ = net_error;
os_error_ = os_error;
- // We are the only consumer of |addrlist|, so we can safely change the port
- // without copy-on-write. This pays off, when job has only one request.
- AddressList list = addrlist;
- if (net_error == OK && !requests_.empty())
- MutableSetPort(requests_.front()->info().port(), &list);
- CompleteRequests(list);
+ base::TimeDelta ttl = base::TimeDelta::FromSeconds(
+ kNegativeCacheEntryTTLSeconds);
+ if (net_error == OK)
+ ttl = base::TimeDelta::FromSeconds(kCacheEntryTTLSeconds);
+ CompleteRequests(addr_list, ttl);
+ }
+
+ void StartDnsTask() {
+ dns_task_.reset(new DnsTask(
+ resolver_->dns_transaction_factory_.get(),
+ key_,
+ base::Bind(&Job::OnDnsTaskComplete, base::Unretained(this)),
+ net_log_));
+
+ int rv = dns_task_->Start();
+ if (rv != ERR_IO_PENDING) {
mmenke 2012/02/15 19:54:16 Maybe reset |dns_task| here, and maybe DCHECK it's
+ DCHECK_NE(OK, rv);
+ StartProcTask();
+ }
+ }
+
+ // Called by DnsTask when it completes.
+ void OnDnsTaskComplete(int net_error,
+ const AddressList& addr_list,
+ base::TimeDelta ttl) {
+ DCHECK(is_dns_running());
+ dns_task_.reset();
+
+ if (net_error != OK) {
+ // TODO(szym): Some net errors indicate lack of connectivity. Starting
+ // ProcTask in that case is a waste of time.
+ StartProcTask();
+ return;
+ }
+
+ net_error_ = OK;
cbentzel 2012/02/15 19:48:35 Are net_error_ and os_error_ needed as members, or
szym 2012/02/15 20:28:05 Agreed that the state indication is all over place
+ os_error_ = 0;
+ CompleteRequests(addr_list, ttl);
}
// Completes all Requests. Calls OnJobFinished and deletes self.
- void CompleteRequests(const AddressList& addrlist) {
+ void CompleteRequests(const AddressList& addr_list, base::TimeDelta ttl) {
CHECK(resolver_);
+ // We are the only consumer of |addr_list|, so we can safely change the port
+ // without copy-on-write. This pays off, when job has only one request.
+ AddressList list = addr_list;
+ if (net_error_ == OK && !requests_.empty())
+ MutableSetPort(requests_.front()->info().port(), &list);
+
// This job must be removed from resolver's |jobs_| now to make room for a
// new job with the same key in case one of the OnComplete callbacks decides
// to spawn one. Consequently, the job deletes itself when CompleteRequests
// is done.
scoped_ptr<Job> self_deleter(this);
- resolver_->OnJobFinished(this, addrlist);
+ resolver_->OnJobFinished(this, addr_list, ttl);
// Complete all of the requests that were attached to the job.
for (RequestsList::const_iterator it = requests_.begin();
@@ -1170,7 +1391,7 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
LogFinishRequest(req->source_net_log(), req->request_net_log(),
req->info(), net_error_, os_error_);
- req->OnComplete(net_error_, addrlist);
+ req->OnComplete(net_error_, addr_list);
// Check if the resolver was destroyed as a result of running the
// callback. If it was, we could continue, but we choose to bail.
@@ -1179,7 +1400,7 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
}
}
- // Used to call OnJobFinished and RemoveJob.
+ // Used to call OnJobFinished.
base::WeakPtr<HostResolverImpl> resolver_;
Key key_;
@@ -1195,14 +1416,17 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
int net_error_;
int os_error_;
- // A ProcTask created and started when this Job is dispatched..
+ // Resolves the host using a HostResolverProc.
scoped_refptr<ProcTask> proc_task_;
+ // Resolves the host using a DnsTransaction.
+ scoped_ptr<DnsTask> dns_task_;
+
// All Requests waiting for the result of this Job. Some can be canceled.
RequestsList requests_;
// A handle used by HostResolverImpl in |dispatcher_|.
- PrioritizedDispatcher::Handle handle_;
+ DispatcherHandle handle_;
};
//-----------------------------------------------------------------------------
@@ -1222,12 +1446,14 @@ HostResolverImpl::HostResolverImpl(
HostCache* cache,
const PrioritizedDispatcher::Limits& job_limits,
const ProcTaskParams& proc_params,
+ scoped_ptr<DnsConfigService> dns_config_service,
NetLog* net_log)
: cache_(cache),
dispatcher_(job_limits),
max_queued_jobs_(job_limits.total_jobs * 100u),
proc_params_(proc_params),
default_address_family_(ADDRESS_FAMILY_UNSPECIFIED),
+ dns_config_service_(dns_config_service.Pass()),
ipv6_probe_monitoring_(false),
additional_resolver_flags_(0),
net_log_(net_log) {
@@ -1254,6 +1480,9 @@ HostResolverImpl::HostResolverImpl(
#endif
NetworkChangeNotifier::AddDNSObserver(this);
#endif
+
+ if (dns_config_service_.get())
cbentzel 2012/02/15 19:48:35 Maybe want a note here that |this| is never remove
+ dns_config_service_->AddObserver(this);
}
HostResolverImpl::~HostResolverImpl() {
@@ -1307,8 +1536,8 @@ int HostResolverImpl::Resolve(const RequestInfo& info,
Job* job;
if (jobit == jobs_.end()) {
// Create new Job.
- job = new Job(this, key, request_net_log);
- job->set_handle(dispatcher_.Add(job, info.priority()));
+ job = new Job(this, key, request_net_log, info.priority());
+ job->AddToDispatcher(&dispatcher_);
// Check for queue overflow.
if (dispatcher_.num_queued_jobs() > max_queued_jobs_) {
@@ -1329,13 +1558,15 @@ int HostResolverImpl::Resolve(const RequestInfo& info,
}
// Can't complete synchronously. Create and attach request.
- Request* req = new Request(source_net_log, request_net_log, info, callback,
- addresses);
- job->AddRequest(req);
- if (!job->handle().is_null())
- job->set_handle(dispatcher_.ChangePriority(job->handle(), job->priority()));
+ scoped_ptr<Request> req(new Request(source_net_log,
+ request_net_log,
+ info,
+ callback,
+ addresses));
if (out_req)
- *out_req = reinterpret_cast<RequestHandle>(req);
+ *out_req = reinterpret_cast<RequestHandle>(req.get());
+
+ job->AddRequest(req.Pass());
DCHECK_EQ(ERR_IO_PENDING, job->net_error());
// Completion happens during Job::CompleteRequests().
@@ -1391,23 +1622,16 @@ void HostResolverImpl::CancelRequest(RequestHandle req_handle) {
job->CancelRequest(req);
- if (!job->handle().is_null()) {
- // Still in queue.
- if (job->num_active_requests()) {
- job->set_handle(dispatcher_.ChangePriority(job->handle(),
- job->priority()));
+ if (job->num_active_requests() == 0) {
+ if (job->is_running()) {
+ // Job is running (and could be in CompleteRequests right now).
+ // But to be in Request::OnComplete we would have to have a non-canceled
+ // request. So it is safe to Abort it if it has no more active requests.
mmenke 2012/02/15 19:54:16 I find this comment confusing. Maybe something li
+ job->Abort();
} else {
- dispatcher_.Cancel(job->handle());
RemoveJob(job);
delete job;
}
- } else {
- // Job is running (and could be in CompleteRequests right now).
- // But to be in Request::OnComplete we would have to have a non-canceled
- // request. So it is safe to Abort it if it has no more active requests.
- if (!job->num_active_requests()) {
- job->Abort();
- }
}
}
@@ -1482,9 +1706,11 @@ bool HostResolverImpl::ServeFromCache(const Key& key,
return true;
}
-void HostResolverImpl::OnJobFinished(Job* job, const AddressList& addrlist) {
+void HostResolverImpl::OnJobFinished(Job* job,
+ const AddressList& addrlist,
mmenke 2012/02/15 19:54:16 At some point (Not necessarily in this CL), you mi
+ base::TimeDelta ttl) {
DCHECK(job);
- DCHECK(job->handle().is_null());
+ DCHECK(!job->IsWaitingInDispatch());
RemoveJob(job);
if (job->net_error() == ERR_HOST_RESOLVER_QUEUE_TOO_LARGE)
return;
@@ -1495,9 +1721,6 @@ void HostResolverImpl::OnJobFinished(Job* job, const AddressList& addrlist) {
return;
// Write result to the cache.
if (cache_.get()) {
- base::TimeDelta ttl = base::TimeDelta::FromSeconds(0);
- if (job->net_error() == OK)
- ttl = base::TimeDelta::FromSeconds(kCacheEntryTTLSeconds);
cache_->Set(job->key(), job->net_error(), addrlist,
base::TimeTicks::Now(), ttl);
}
@@ -1557,7 +1780,7 @@ void HostResolverImpl::AbortAllInProgressJobs() {
return;
} else {
// Keep it in |dispatch_|.
- DCHECK(!job->handle().is_null());
+ DCHECK(job->IsWaitingInDispatch());
}
}
}
@@ -1594,4 +1817,22 @@ void HostResolverImpl::OnDNSChanged() {
// |this| may be deleted inside AbortAllInProgressJobs().
}
+void HostResolverImpl::OnConfigChanged(const DnsConfig& dns_config) {
+ // We want a new factory in place, before we Abort running Jobs, so that the
+ // newly started jobs use the new factory.
+ bool had_factory = (dns_transaction_factory_.get() != NULL);
+ if (dns_config.IsValid()) {
+ dns_transaction_factory_ = DnsTransactionFactory::CreateFactory(
+ new DnsSession(dns_config,
+ ClientSocketFactory::GetDefaultFactory(),
+ base::Bind(&base::RandInt),
+ net_log_));
+ } else {
+ dns_transaction_factory_.reset();
+ }
+ if (had_factory) {
cbentzel 2012/02/15 19:48:35 Nit: braces not needed. Also - would it be easier
szym 2012/02/15 20:28:05 OnDNSChanged aborts all in-flight Jobs. Calling it
+ OnDNSChanged();
+ }
+}
+
} // namespace net

Powered by Google App Engine
This is Rietveld 408576698