| Index: net/socket/client_socket_pool_base.cc
|
| diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc
|
| index d230ad4f38461533502cc1d46ec0e458c9c11f19..22fd29d7e22c7c52d4b3b4ebe9ff6162a9920b3a 100644
|
| --- a/net/socket/client_socket_pool_base.cc
|
| +++ b/net/socket/client_socket_pool_base.cc
|
| @@ -207,6 +207,7 @@ ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() {
|
| DCHECK(group_map_.empty());
|
| DCHECK(pending_callback_map_.empty());
|
| DCHECK_EQ(0, connecting_socket_count_);
|
| + CHECK(higher_layer_pools_.empty());
|
|
|
| NetworkChangeNotifier::RemoveIPAddressObserver(this);
|
| }
|
| @@ -238,6 +239,18 @@ ClientSocketPoolBaseHelper::RemoveRequestFromQueue(
|
| return req;
|
| }
|
|
|
| +void ClientSocketPoolBaseHelper::AddLayeredPool(LayeredPool* pool) {
|
| + CHECK(pool);
|
| + CHECK(!ContainsKey(higher_layer_pools_, pool));
|
| + higher_layer_pools_.insert(pool);
|
| +}
|
| +
|
| +void ClientSocketPoolBaseHelper::RemoveLayeredPool(LayeredPool* pool) {
|
| + CHECK(pool);
|
| + CHECK(ContainsKey(higher_layer_pools_, pool));
|
| + higher_layer_pools_.erase(pool);
|
| +}
|
| +
|
| int ClientSocketPoolBaseHelper::RequestSocket(
|
| const std::string& group_name,
|
| const Request* request) {
|
| @@ -336,26 +349,46 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal(
|
| // Can we make another active socket now?
|
| if (!group->HasAvailableSocketSlot(max_sockets_per_group_) &&
|
| !request->ignore_limits()) {
|
| + // TODO(willchan): Consider whether or not we need to close a socket in a
|
| + // higher layered group. I don't think this makes sense since we would just
|
| + // reuse that socket then if we needed one and wouldn't make it down to this
|
| + // layer.
|
| request->net_log().AddEvent(
|
| NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL);
|
| return ERR_IO_PENDING;
|
| }
|
|
|
| if (ReachedMaxSocketsLimit() && !request->ignore_limits()) {
|
| + // NOTE(mmenke): Wonder if we really need different code for each case
|
| + // here. Only reason for them now seems to be preconnects.
|
| if (idle_socket_count() > 0) {
|
| + // There's an idle socket in this pool. Either that's because there's
|
| + // still one in this group, but we got here due to preconnecting bypassing
|
| + // idle sockets, or because there's an idle socket in another group.
|
| bool closed = CloseOneIdleSocketExceptInGroup(group);
|
| if (preconnecting && !closed)
|
| return ERR_PRECONNECT_MAX_SOCKET_LIMIT;
|
| } else {
|
| - // We could check if we really have a stalled group here, but it requires
|
| - // a scan of all groups, so just flip a flag here, and do the check later.
|
| - request->net_log().AddEvent(
|
| - NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL);
|
| - return ERR_IO_PENDING;
|
| + do {
|
| + if (!CloseOneIdleConnectionInLayeredPool()) {
|
| + // We could check if we really have a stalled group here, but it
|
| + // requires a scan of all groups, so just flip a flag here, and do
|
| + // the check later.
|
| + request->net_log().AddEvent(
|
| + NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL);
|
| + return ERR_IO_PENDING;
|
| + }
|
| + } while (ReachedMaxSocketsLimit());
|
| +
|
| + // It is possible that CloseOneIdleConnectionInLayeredPool() has deleted
|
| + // our Group (see http://crbug.com/109876), so look it up again
|
| + // to be safe.
|
| + group = GetOrCreateGroup(group_name);
|
| }
|
| }
|
|
|
| - // We couldn't find a socket to reuse, so allocate and connect a new one.
|
| + // We couldn't find a socket to reuse, and there's space to allocate one,
|
| + // so allocate and connect a new one.
|
| scoped_ptr<ConnectJob> connect_job(
|
| connect_job_factory_->NewConnectJob(group_name, *request, this));
|
|
|
| @@ -617,7 +650,8 @@ DictionaryValue* ClientSocketPoolBaseHelper::GetInfoAsValue(
|
| group_dict->Set("connect_jobs", connect_jobs_list);
|
|
|
| group_dict->SetBoolean("is_stalled",
|
| - group->IsStalled(max_sockets_per_group_));
|
| + group->IsStalledOnPoolMaxSockets(
|
| + max_sockets_per_group_));
|
| group_dict->SetBoolean("has_backup_job", group->HasBackupJob());
|
|
|
| all_groups_dict->SetWithoutPathExpansion(it->first, group_dict);
|
| @@ -792,18 +826,22 @@ void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() {
|
| // are not at the |max_sockets_per_group_| limit. Note: for requests with
|
| // the same priority, the winner is based on group hash ordering (and not
|
| // insertion order).
|
| -bool ClientSocketPoolBaseHelper::FindTopStalledGroup(Group** group,
|
| - std::string* group_name) {
|
| +bool ClientSocketPoolBaseHelper::FindTopStalledGroup(
|
| + Group** group,
|
| + std::string* group_name) const {
|
| + CHECK((group && group_name) || (!group && !group_name));
|
| Group* top_group = NULL;
|
| const std::string* top_group_name = NULL;
|
| bool has_stalled_group = false;
|
| - for (GroupMap::iterator i = group_map_.begin();
|
| + for (GroupMap::const_iterator i = group_map_.begin();
|
| i != group_map_.end(); ++i) {
|
| Group* curr_group = i->second;
|
| const RequestQueue& queue = curr_group->pending_requests();
|
| if (queue.empty())
|
| continue;
|
| - if (curr_group->IsStalled(max_sockets_per_group_)) {
|
| + if (curr_group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) {
|
| + if (!group)
|
| + return true;
|
| has_stalled_group = true;
|
| bool has_higher_priority = !top_group ||
|
| curr_group->TopPendingPriority() < top_group->TopPendingPriority();
|
| @@ -815,8 +853,11 @@ bool ClientSocketPoolBaseHelper::FindTopStalledGroup(Group** group,
|
| }
|
|
|
| if (top_group) {
|
| + CHECK(group);
|
| *group = top_group;
|
| *group_name = *top_group_name;
|
| + } else {
|
| + CHECK(!has_stalled_group);
|
| }
|
| return has_stalled_group;
|
| }
|
| @@ -889,6 +930,25 @@ void ClientSocketPoolBaseHelper::Flush() {
|
| AbortAllRequests();
|
| }
|
|
|
| +bool ClientSocketPoolBaseHelper::IsStalled() const {
|
| + // If we are not using |max_sockets_|, then clearly we are not stalled
|
| + if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_)
|
| + return false;
|
| + // So in order to be stalled we need to be using |max_sockets_| AND
|
| + // we need to have a request that is actually stalled on the global
|
| + // socket limit. To find such a request, we look for a group that
|
| + // a has more requests that jobs AND where the number of jobs is less
|
| + // than |max_sockets_per_group_|. (If the number of jobs is equal to
|
| + // |max_sockets_per_group_|, then the request is stalled on the group,
|
| + // which does not count.)
|
| + for (GroupMap::const_iterator it = group_map_.begin();
|
| + it != group_map_.end(); it++) {
|
| + if (it->second->IsStalledOnPoolMaxSockets(max_sockets_per_group_))
|
| + return true;
|
| + }
|
| + return false;
|
| +}
|
| +
|
| void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job,
|
| Group* group) {
|
| CHECK_GT(connecting_socket_count_, 0);
|
| @@ -1025,8 +1085,10 @@ bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
|
| return true;
|
| }
|
|
|
| -void ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
|
| - CloseOneIdleSocketExceptInGroup(NULL);
|
| +bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
|
| + if (idle_socket_count() == 0)
|
| + return false;
|
| + return CloseOneIdleSocketExceptInGroup(NULL);
|
| }
|
|
|
| bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
|
| @@ -1050,9 +1112,19 @@ bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
|
| }
|
| }
|
|
|
| - if (!exception_group)
|
| - LOG(DFATAL) << "No idle socket found to close!.";
|
| + return false;
|
| +}
|
|
|
| +bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInLayeredPool() {
|
| + // This pool doesn't have any idle sockets. It's possible that a pool at a
|
| + // higher layer is holding one of this sockets active, but it's actually idle.
|
| + // Query the higher layers.
|
| + for (std::set<LayeredPool*>::const_iterator it = higher_layer_pools_.begin();
|
| + it != higher_layer_pools_.end(); ++it) {
|
| + (*it)->CrashIfFreed();
|
| + if ((*it)->CloseOneIdleConnection())
|
| + return true;
|
| + }
|
| return false;
|
| }
|
|
|
|
|