Chromium Code Reviews| Index: net/socket/client_socket_pool_base.cc |
| diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc |
| index 231254a5b7d5cb126920812afbeea606bdf7b2be..85da40ed91e353dad74e0108661790c54fe68b3c 100644 |
| --- a/net/socket/client_socket_pool_base.cc |
| +++ b/net/socket/client_socket_pool_base.cc |
| @@ -202,6 +202,7 @@ ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() { |
| DCHECK(group_map_.empty()); |
| DCHECK(pending_callback_map_.empty()); |
| DCHECK_EQ(0, connecting_socket_count_); |
| + DCHECK(higher_layer_pools_.empty()); |
| NetworkChangeNotifier::RemoveIPAddressObserver(this); |
| } |
| @@ -231,6 +232,18 @@ ClientSocketPoolBaseHelper::RemoveRequestFromQueue( |
| return req; |
| } |
| +void ClientSocketPoolBaseHelper::AddLayeredPool(LayeredPool* pool) { |
| + CHECK(pool); |
| + CHECK(!ContainsKey(higher_layer_pools_, pool)); |
| + higher_layer_pools_.insert(pool); |
| +} |
| + |
| +void ClientSocketPoolBaseHelper::RemoveLayeredPool(LayeredPool* pool) { |
| + CHECK(pool); |
| + CHECK(ContainsKey(higher_layer_pools_, pool)); |
| + higher_layer_pools_.erase(pool); |
| +} |
| + |
| int ClientSocketPoolBaseHelper::RequestSocket( |
| const std::string& group_name, |
| const Request* request) { |
| @@ -321,6 +334,10 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( |
| // Can we make another active socket now? |
| if (!group->HasAvailableSocketSlot(max_sockets_per_group_) && |
| !request->ignore_limits()) { |
| + // TODO(willchan): Consider whether or not we need to close a socket in a |
| + // higher layered group. I don't think this makes sense since we would just |
| + // reuse that socket then if we needed one and wouldn't make it down to this |
| + // layer. |
| request->net_log().AddEvent( |
| NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL); |
| return ERR_IO_PENDING; |
| @@ -328,10 +345,13 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( |
| if (ReachedMaxSocketsLimit() && !request->ignore_limits()) { |
| if (idle_socket_count() > 0) { |
| + // There's an idle socket in this pool. Either that's because there's |
| + // still one in this group, but we got here due to preconnecting bypassing |
| + // idle sockets, or because there's an idle socket in another group. |
| bool closed = CloseOneIdleSocketExceptInGroup(group); |
| if (preconnecting && !closed) |
| return ERR_PRECONNECT_MAX_SOCKET_LIMIT; |
| - } else { |
| + } else if (!CloseOneIdleConnectionInLayeredPool()) { |
|
mmenke
2011/10/27 18:52:37
Is this guaranteed to result in ReachedMaxSocketsL
willchan no longer on Chromium
2011/11/08 22:26:13
Sorry, I don't grok. Which path do you mean? Also
mmenke
2011/11/08 22:41:04
So you have a request with ignore_limits() set to
willchan no longer on Chromium
2011/11/15 18:44:06
GREAT catch. I'm impressed you caught this given y
|
| // We could check if we really have a stalled group here, but it requires |
| // a scan of all groups, so just flip a flag here, and do the check later. |
| request->net_log().AddEvent( |
| @@ -340,7 +360,8 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( |
| } |
| } |
| - // We couldn't find a socket to reuse, so allocate and connect a new one. |
| + // We couldn't find a socket to reuse, and there's space to allocate one, |
| + // so allocate and connect a new one. |
| scoped_ptr<ConnectJob> connect_job( |
| connect_job_factory_->NewConnectJob(group_name, *request, this)); |
| @@ -761,18 +782,22 @@ void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() { |
| // are not at the |max_sockets_per_group_| limit. Note: for requests with |
| // the same priority, the winner is based on group hash ordering (and not |
| // insertion order). |
| -bool ClientSocketPoolBaseHelper::FindTopStalledGroup(Group** group, |
| - std::string* group_name) { |
| +bool ClientSocketPoolBaseHelper::FindTopStalledGroup( |
| + Group** group, |
| + std::string* group_name) const { |
| + CHECK((group && group_name) || (!group && !group_name)); |
| Group* top_group = NULL; |
| const std::string* top_group_name = NULL; |
| bool has_stalled_group = false; |
| - for (GroupMap::iterator i = group_map_.begin(); |
| + for (GroupMap::const_iterator i = group_map_.begin(); |
| i != group_map_.end(); ++i) { |
| Group* curr_group = i->second; |
| const RequestQueue& queue = curr_group->pending_requests(); |
| if (queue.empty()) |
| continue; |
| if (curr_group->IsStalled(max_sockets_per_group_)) { |
| + if (!group) |
| + return true; |
| has_stalled_group = true; |
| bool has_higher_priority = !top_group || |
| curr_group->TopPendingPriority() < top_group->TopPendingPriority(); |
| @@ -784,8 +809,11 @@ bool ClientSocketPoolBaseHelper::FindTopStalledGroup(Group** group, |
| } |
| if (top_group) { |
| + CHECK(group); |
| *group = top_group; |
| *group_name = *top_group_name; |
| + } else { |
| + CHECK(!has_stalled_group); |
| } |
| return has_stalled_group; |
| } |
| @@ -858,6 +886,18 @@ void ClientSocketPoolBaseHelper::Flush() { |
| AbortAllRequests(); |
| } |
| +bool ClientSocketPoolBaseHelper::IsStalled() const { |
| + CHECK_LE(handed_out_socket_count_ + connecting_socket_count_, max_sockets_); |
|
mmenke
2011/10/27 18:52:37
Given that requests have an "ignore_limits" flag,
willchan no longer on Chromium
2011/11/08 22:26:13
Good point!
|
| + if ((handed_out_socket_count_ + connecting_socket_count_) != max_sockets_) |
| + return false; |
| + for (GroupMap::const_iterator it = group_map_.begin(); |
| + it != group_map_.end(); it++) { |
| + if (it->second->IsStalled(max_sockets_per_group_)) |
| + return true; |
| + } |
| + return false; |
| +} |
| + |
| void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job, |
| Group* group) { |
| CHECK_GT(connecting_socket_count_, 0); |
| @@ -995,8 +1035,10 @@ bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const { |
| return true; |
| } |
| -void ClientSocketPoolBaseHelper::CloseOneIdleSocket() { |
| - CloseOneIdleSocketExceptInGroup(NULL); |
| +bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() { |
| + if (idle_socket_count() == 0) |
| + return false; |
| + return CloseOneIdleSocketExceptInGroup(NULL); |
| } |
| bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup( |
| @@ -1020,9 +1062,18 @@ bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup( |
| } |
| } |
| - if (!exception_group) |
| - LOG(DFATAL) << "No idle socket found to close!."; |
| + return false; |
| +} |
| +bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInLayeredPool() { |
| + // This pool doesn't have any idle sockets. It's possible that a pool at a |
| + // higher layer is holding one of this sockets active, but it's actually idle. |
| + // Query the higher layers. |
| + for (std::set<LayeredPool*>::const_iterator it = |
| + higher_layer_pools_.begin(); it != higher_layer_pools_.end(); ++it) { |
|
mmenke
2011/10/27 18:52:37
nit: Looks to me like higher_layer_pools_.begin()
willchan no longer on Chromium
2011/11/08 22:26:13
Done.
|
| + if ((*it)->CloseOneIdleConnection()) |
| + return true; |
| + } |
| return false; |
| } |