| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/base/host_resolver_impl.h" | 5 #include "net/base/host_resolver_impl.h" |
| 6 | 6 |
| 7 #if defined(OS_WIN) | 7 #if defined(OS_WIN) |
| 8 #include <Winsock2.h> | 8 #include <Winsock2.h> |
| 9 #elif defined(OS_POSIX) | 9 #elif defined(OS_POSIX) |
| 10 #include <netdb.h> | 10 #include <netdb.h> |
| (...skipping 1058 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1069 priority_tracker_(priority), | 1069 priority_tracker_(priority), |
| 1070 had_non_speculative_request_(false), | 1070 had_non_speculative_request_(false), |
| 1071 net_log_(BoundNetLog::Make(request_net_log.net_log(), | 1071 net_log_(BoundNetLog::Make(request_net_log.net_log(), |
| 1072 NetLog::SOURCE_HOST_RESOLVER_IMPL_JOB)) { | 1072 NetLog::SOURCE_HOST_RESOLVER_IMPL_JOB)) { |
| 1073 request_net_log.AddEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_CREATE_JOB, NULL); | 1073 request_net_log.AddEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_CREATE_JOB, NULL); |
| 1074 | 1074 |
| 1075 net_log_.BeginEvent( | 1075 net_log_.BeginEvent( |
| 1076 NetLog::TYPE_HOST_RESOLVER_IMPL_JOB, | 1076 NetLog::TYPE_HOST_RESOLVER_IMPL_JOB, |
| 1077 make_scoped_refptr(new JobCreationParameters( | 1077 make_scoped_refptr(new JobCreationParameters( |
| 1078 key_.hostname, request_net_log.source()))); | 1078 key_.hostname, request_net_log.source()))); |
| 1079 | |
| 1080 handle_ = resolver_->dispatcher_.Add(this, priority); | |
| 1081 } | 1079 } |
| 1082 | 1080 |
| 1083 virtual ~Job() { | 1081 virtual ~Job() { |
| 1084 if (is_running()) { | 1082 if (is_running()) { |
| 1085 // |resolver_| was destroyed with this Job still in flight. | 1083 // |resolver_| was destroyed with this Job still in flight. |
| 1086 // Clean-up, record in the log, but don't run any callbacks. | 1084 // Clean-up, record in the log, but don't run any callbacks. |
| 1087 if (is_proc_running()) { | 1085 if (is_proc_running()) { |
| 1088 proc_task_->Cancel(); | 1086 proc_task_->Cancel(); |
| 1089 proc_task_ = NULL; | 1087 proc_task_ = NULL; |
| 1090 } | 1088 } |
| 1091 net_log_.EndEventWithNetErrorCode(NetLog::TYPE_HOST_RESOLVER_IMPL_JOB, | 1089 net_log_.EndEventWithNetErrorCode(NetLog::TYPE_HOST_RESOLVER_IMPL_JOB, |
| 1092 ERR_ABORTED); | 1090 ERR_ABORTED); |
| 1093 } else if (is_queued()) { | 1091 } else if (is_queued()) { |
| 1094 // This Job was cancelled without running. | 1092 // |resolver_| was destroyed without running this Job. |
| 1095 // TODO(szym): is there's any benefit in having this distinction? | 1093 // TODO(szym): is there's any benefit in having this distinction? |
| 1096 net_log_.AddEvent(NetLog::TYPE_CANCELLED, NULL); | 1094 net_log_.AddEvent(NetLog::TYPE_CANCELLED, NULL); |
| 1097 net_log_.EndEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_JOB, NULL); | 1095 net_log_.EndEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_JOB, NULL); |
| 1098 } | 1096 } |
| 1099 // else CompleteRequests logged EndEvent. | 1097 // else CompleteRequests logged EndEvent. |
| 1100 | 1098 |
| 1101 // Log any remaining Requests as cancelled. | 1099 // Log any remaining Requests as cancelled. |
| 1102 for (RequestsList::const_iterator it = requests_.begin(); | 1100 for (RequestsList::const_iterator it = requests_.begin(); |
| 1103 it != requests_.end(); ++it) { | 1101 it != requests_.end(); ++it) { |
| 1104 Request* req = *it; | 1102 Request* req = *it; |
| 1105 if (req->was_canceled()) | 1103 if (req->was_canceled()) |
| 1106 continue; | 1104 continue; |
| 1107 DCHECK_EQ(this, req->job()); | 1105 DCHECK_EQ(this, req->job()); |
| 1108 LogCancelRequest(req->source_net_log(), req->request_net_log(), | 1106 LogCancelRequest(req->source_net_log(), req->request_net_log(), |
| 1109 req->info()); | 1107 req->info()); |
| 1110 } | 1108 } |
| 1111 STLDeleteElements(&requests_); | 1109 STLDeleteElements(&requests_); |
| 1112 } | 1110 } |
| 1113 | 1111 |
| 1112 void Schedule() { |
| 1113 handle_ = resolver_->dispatcher_.Add(this, priority()); |
| 1114 } |
| 1115 |
| 1114 RequestPriority priority() const { | 1116 RequestPriority priority() const { |
| 1115 return priority_tracker_.highest_priority(); | 1117 return priority_tracker_.highest_priority(); |
| 1116 } | 1118 } |
| 1117 | 1119 |
| 1118 // Number of non-canceled requests in |requests_|. | 1120 // Number of non-canceled requests in |requests_|. |
| 1119 size_t num_active_requests() const { | 1121 size_t num_active_requests() const { |
| 1120 return priority_tracker_.total_count(); | 1122 return priority_tracker_.total_count(); |
| 1121 } | 1123 } |
| 1122 | 1124 |
| 1123 const Key& key() const { | 1125 const Key& key() const { |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1169 NetLog::TYPE_HOST_RESOLVER_IMPL_JOB_REQUEST_DETACH, | 1171 NetLog::TYPE_HOST_RESOLVER_IMPL_JOB_REQUEST_DETACH, |
| 1170 make_scoped_refptr(new JobAttachParameters( | 1172 make_scoped_refptr(new JobAttachParameters( |
| 1171 req->request_net_log().source(), priority()))); | 1173 req->request_net_log().source(), priority()))); |
| 1172 | 1174 |
| 1173 if (!handle_.is_null()) { | 1175 if (!handle_.is_null()) { |
| 1174 if (num_active_requests() > 0) { | 1176 if (num_active_requests() > 0) { |
| 1175 handle_ = resolver_->dispatcher_.ChangePriority(handle_, priority()); | 1177 handle_ = resolver_->dispatcher_.ChangePriority(handle_, priority()); |
| 1176 } else { | 1178 } else { |
| 1177 resolver_->dispatcher_.Cancel(handle_); | 1179 resolver_->dispatcher_.Cancel(handle_); |
| 1178 handle_.Reset(); | 1180 handle_.Reset(); |
| 1181 net_log_.AddEvent(NetLog::TYPE_CANCELLED, NULL); |
| 1182 net_log_.EndEvent(NetLog::TYPE_HOST_RESOLVER_IMPL_JOB, NULL); |
| 1179 } | 1183 } |
| 1180 } | 1184 } |
| 1181 } | 1185 } |
| 1182 | 1186 |
| 1183 // Aborts and destroys the job, completes all requests as aborted. | 1187 // Aborts and destroys the job, completes all requests as aborted. |
| 1184 // The caller should clean up. | 1188 // The caller should clean up. |
| 1185 void Abort() { | 1189 void Abort() { |
| 1186 // Job should only be aborted if it's running. | 1190 // Job should only be aborted if it's running. |
| 1187 DCHECK(is_running()); | 1191 DCHECK(is_running()); |
| 1188 if (is_proc_running()) { | 1192 if (is_proc_running()) { |
| (...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1480 } | 1484 } |
| 1481 | 1485 |
| 1482 // Next we need to attach our request to a "job". This job is responsible for | 1486 // Next we need to attach our request to a "job". This job is responsible for |
| 1483 // calling "getaddrinfo(hostname)" on a worker thread. | 1487 // calling "getaddrinfo(hostname)" on a worker thread. |
| 1484 | 1488 |
| 1485 JobMap::iterator jobit = jobs_.find(key); | 1489 JobMap::iterator jobit = jobs_.find(key); |
| 1486 Job* job; | 1490 Job* job; |
| 1487 if (jobit == jobs_.end()) { | 1491 if (jobit == jobs_.end()) { |
| 1488 // Create new Job. | 1492 // Create new Job. |
| 1489 job = new Job(this, key, request_net_log, info.priority()); | 1493 job = new Job(this, key, request_net_log, info.priority()); |
| 1494 job->Schedule(); |
| 1490 | 1495 |
| 1491 // Check for queue overflow. | 1496 // Check for queue overflow. |
| 1492 if (dispatcher_.num_queued_jobs() > max_queued_jobs_) { | 1497 if (dispatcher_.num_queued_jobs() > max_queued_jobs_) { |
| 1493 Job* evicted = static_cast<Job*>(dispatcher_.EvictOldestLowest()); | 1498 Job* evicted = static_cast<Job*>(dispatcher_.EvictOldestLowest()); |
| 1494 DCHECK(evicted); | 1499 DCHECK(evicted); |
| 1495 if (evicted == job) { | 1500 if (evicted == job) { |
| 1496 delete job; | 1501 delete job; |
| 1497 rv = ERR_HOST_RESOLVER_QUEUE_TOO_LARGE; | 1502 rv = ERR_HOST_RESOLVER_QUEUE_TOO_LARGE; |
| 1498 LogFinishRequest(source_net_log, request_net_log, info, rv); | 1503 LogFinishRequest(source_net_log, request_net_log, info, rv); |
| 1499 return rv; | 1504 return rv; |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1565 Job* job = req->job(); | 1570 Job* job = req->job(); |
| 1566 DCHECK(job); | 1571 DCHECK(job); |
| 1567 | 1572 |
| 1568 job->CancelRequest(req); | 1573 job->CancelRequest(req); |
| 1569 | 1574 |
| 1570 if (job->num_active_requests() == 0) { | 1575 if (job->num_active_requests() == 0) { |
| 1571 // If we were called from a Requests' callback within Job::CompleteRequests, | 1576 // If we were called from a Requests' callback within Job::CompleteRequests, |
| 1572 // that Request could not have been cancelled, so job->num_active_requests() | 1577 // that Request could not have been cancelled, so job->num_active_requests() |
| 1573 // could not be 0. Therefore, we are not in Job::CompleteRequests(). | 1578 // could not be 0. Therefore, we are not in Job::CompleteRequests(). |
| 1574 RemoveJob(job); | 1579 RemoveJob(job); |
| 1575 if (job->is_running()) | 1580 if (job->is_running()) { |
| 1581 dispatcher_.OnJobFinished(); |
| 1576 job->Abort(); | 1582 job->Abort(); |
| 1583 } |
| 1577 delete job; | 1584 delete job; |
| 1578 } | 1585 } |
| 1579 } | 1586 } |
| 1580 | 1587 |
| 1581 void HostResolverImpl::SetDefaultAddressFamily(AddressFamily address_family) { | 1588 void HostResolverImpl::SetDefaultAddressFamily(AddressFamily address_family) { |
| 1582 DCHECK(CalledOnValidThread()); | 1589 DCHECK(CalledOnValidThread()); |
| 1583 ipv6_probe_monitoring_ = false; | 1590 ipv6_probe_monitoring_ = false; |
| 1584 DiscardIPv6ProbeJob(); | 1591 DiscardIPv6ProbeJob(); |
| 1585 default_address_family_ = address_family; | 1592 default_address_family_ = address_family; |
| 1586 } | 1593 } |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1702 if (effective_address_family == ADDRESS_FAMILY_UNSPECIFIED && | 1709 if (effective_address_family == ADDRESS_FAMILY_UNSPECIFIED && |
| 1703 default_address_family_ != ADDRESS_FAMILY_UNSPECIFIED) { | 1710 default_address_family_ != ADDRESS_FAMILY_UNSPECIFIED) { |
| 1704 effective_address_family = default_address_family_; | 1711 effective_address_family = default_address_family_; |
| 1705 if (ipv6_probe_monitoring_) | 1712 if (ipv6_probe_monitoring_) |
| 1706 effective_flags |= HOST_RESOLVER_DEFAULT_FAMILY_SET_DUE_TO_NO_IPV6; | 1713 effective_flags |= HOST_RESOLVER_DEFAULT_FAMILY_SET_DUE_TO_NO_IPV6; |
| 1707 } | 1714 } |
| 1708 return Key(info.hostname(), effective_address_family, effective_flags); | 1715 return Key(info.hostname(), effective_address_family, effective_flags); |
| 1709 } | 1716 } |
| 1710 | 1717 |
| 1711 void HostResolverImpl::AbortAllInProgressJobs() { | 1718 void HostResolverImpl::AbortAllInProgressJobs() { |
| 1712 base::WeakPtr<HostResolverImpl> self = AsWeakPtr(); | |
| 1713 // In Abort, a Request callback could spawn new Jobs with matching keys, so | 1719 // In Abort, a Request callback could spawn new Jobs with matching keys, so |
| 1714 // first collect and remove all running jobs from |jobs_|. | 1720 // first collect and remove all running jobs from |jobs_|. |
| 1715 std::vector<Job*> jobs_to_abort; | 1721 std::vector<Job*> jobs_to_abort; |
| 1716 for (JobMap::iterator it = jobs_.begin(); it != jobs_.end(); ) { | 1722 for (JobMap::iterator it = jobs_.begin(); it != jobs_.end(); ) { |
| 1717 Job* job = it->second; | 1723 Job* job = it->second; |
| 1718 if (job->is_running()) { | 1724 if (job->is_running()) { |
| 1719 jobs_to_abort.push_back(job); | 1725 jobs_to_abort.push_back(job); |
| 1720 jobs_.erase(it++); | 1726 jobs_.erase(it++); |
| 1721 } else { | 1727 } else { |
| 1722 DCHECK(job->is_queued()); | 1728 DCHECK(job->is_queued()); |
| 1723 ++it; | 1729 ++it; |
| 1724 } | 1730 } |
| 1725 } | 1731 } |
| 1726 | 1732 |
| 1733 // Check if no dispatcher slots leaked out. |
| 1734 DCHECK_EQ(dispatcher_.num_running_jobs(), jobs_to_abort.size()); |
| 1735 |
| 1736 // Life check to bail once |this| is deleted. |
| 1737 base::WeakPtr<HostResolverImpl> self = AsWeakPtr(); |
| 1738 |
| 1727 // Then Abort them and dispatch new Jobs. | 1739 // Then Abort them and dispatch new Jobs. |
| 1728 for (size_t i = 0; i < jobs_to_abort.size(); ++i) { | 1740 for (size_t i = 0; self && i < jobs_to_abort.size(); ++i) { |
| 1741 dispatcher_.OnJobFinished(); |
| 1729 jobs_to_abort[i]->Abort(); | 1742 jobs_to_abort[i]->Abort(); |
| 1730 dispatcher_.OnJobFinished(); | |
| 1731 } | 1743 } |
| 1732 STLDeleteElements(&jobs_to_abort); | 1744 STLDeleteElements(&jobs_to_abort); |
| 1733 } | 1745 } |
| 1734 | 1746 |
| 1735 void HostResolverImpl::OnIPAddressChanged() { | 1747 void HostResolverImpl::OnIPAddressChanged() { |
| 1736 if (cache_.get()) | 1748 if (cache_.get()) |
| 1737 cache_->clear(); | 1749 cache_->clear(); |
| 1738 if (ipv6_probe_monitoring_) { | 1750 if (ipv6_probe_monitoring_) { |
| 1739 DiscardIPv6ProbeJob(); | 1751 DiscardIPv6ProbeJob(); |
| 1740 ipv6_probe_job_ = new IPv6ProbeJob(this); | 1752 ipv6_probe_job_ = new IPv6ProbeJob(this); |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1777 } else { | 1789 } else { |
| 1778 dns_transaction_factory_.reset(); | 1790 dns_transaction_factory_.reset(); |
| 1779 } | 1791 } |
| 1780 // Don't Abort running Jobs unless they were running on DnsTransaction. | 1792 // Don't Abort running Jobs unless they were running on DnsTransaction. |
| 1781 // TODO(szym): This will change once http://crbug.com/114827 is fixed. | 1793 // TODO(szym): This will change once http://crbug.com/114827 is fixed. |
| 1782 if (had_factory) | 1794 if (had_factory) |
| 1783 OnDNSChanged(NetworkChangeNotifier::CHANGE_DNS_SETTINGS); | 1795 OnDNSChanged(NetworkChangeNotifier::CHANGE_DNS_SETTINGS); |
| 1784 } | 1796 } |
| 1785 | 1797 |
| 1786 } // namespace net | 1798 } // namespace net |
| OLD | NEW |