Index: net/base/host_resolver_impl.cc |
diff --git a/net/base/host_resolver_impl.cc b/net/base/host_resolver_impl.cc |
index 1e4d757af05ed2d6934e28ae95bafdffc1b125bb..1982b440afa65c814c868280348bf6d4ee2a14fa 100644 |
--- a/net/base/host_resolver_impl.cc |
+++ b/net/base/host_resolver_impl.cc |
@@ -1572,8 +1572,10 @@ void HostResolverImpl::CancelRequest(RequestHandle req_handle) { |
// that Request could not have been cancelled, so job->num_active_requests() |
// could not be 0. Therefore, we are not in Job::CompleteRequests(). |
RemoveJob(job); |
- if (job->is_running()) |
+ if (job->is_running()) { |
cbentzel
2012/03/02 19:46:49
I asked over email - but is there a chance that a
|
+ dispatcher_.OnJobFinished(); |
job->Abort(); |
cbentzel
2012/03/02 20:02:40
FYI: I don't think the Abort is needed here. ~Job
szym
2012/03/02 20:05:43
The cleaning up is now in a few places: Job::~Job,
|
+ } |
delete job; |
} |
} |
@@ -1709,7 +1711,6 @@ HostResolverImpl::Key HostResolverImpl::GetEffectiveKeyForRequest( |
} |
void HostResolverImpl::AbortAllInProgressJobs() { |
- base::WeakPtr<HostResolverImpl> self = AsWeakPtr(); |
// In Abort, a Request callback could spawn new Jobs with matching keys, so |
// first collect and remove all running jobs from |jobs_|. |
std::vector<Job*> jobs_to_abort; |
@@ -1724,10 +1725,16 @@ void HostResolverImpl::AbortAllInProgressJobs() { |
} |
} |
+ // Check if no dispatcher slots leaked out. |
+ DCHECK_EQ(dispatcher_.num_running_jobs(), jobs_to_abort.size()); |
+ |
+ // Life check to bail once |this| is deleted. |
+ base::WeakPtr<HostResolverImpl> self = AsWeakPtr(); |
+ |
// Then Abort them and dispatch new Jobs. |
- for (size_t i = 0; i < jobs_to_abort.size(); ++i) { |
- jobs_to_abort[i]->Abort(); |
+ for (size_t i = 0; self && i < jobs_to_abort.size(); ++i) { |
dispatcher_.OnJobFinished(); |
+ jobs_to_abort[i]->Abort(); |
} |
STLDeleteElements(&jobs_to_abort); |
} |