| OLD | NEW |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/socket/client_socket_pool_base.h" | 5 #include "net/socket/client_socket_pool_base.h" |
| 6 | 6 |
| 7 #include <math.h> | 7 #include <math.h> |
| 8 #include "base/compiler_specific.h" | 8 #include "base/compiler_specific.h" |
| 9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
| 10 #include "base/logging.h" | 10 #include "base/logging.h" |
| (...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 200 } | 200 } |
| 201 | 201 |
| 202 ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() { | 202 ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() { |
| 203 // Clean up any idle sockets and pending connect jobs. Assert that we have no | 203 // Clean up any idle sockets and pending connect jobs. Assert that we have no |
| 204 // remaining active sockets or pending requests. They should have all been | 204 // remaining active sockets or pending requests. They should have all been |
| 205 // cleaned up prior to |this| being destroyed. | 205 // cleaned up prior to |this| being destroyed. |
| 206 Flush(); | 206 Flush(); |
| 207 DCHECK(group_map_.empty()); | 207 DCHECK(group_map_.empty()); |
| 208 DCHECK(pending_callback_map_.empty()); | 208 DCHECK(pending_callback_map_.empty()); |
| 209 DCHECK_EQ(0, connecting_socket_count_); | 209 DCHECK_EQ(0, connecting_socket_count_); |
| 210 DCHECK(higher_layer_pools_.empty()); | |
| 211 | 210 |
| 212 NetworkChangeNotifier::RemoveIPAddressObserver(this); | 211 NetworkChangeNotifier::RemoveIPAddressObserver(this); |
| 213 } | 212 } |
| 214 | 213 |
| 215 // InsertRequestIntoQueue inserts the request into the queue based on | 214 // InsertRequestIntoQueue inserts the request into the queue based on |
| 216 // priority. Highest priorities are closest to the front. Older requests are | 215 // priority. Highest priorities are closest to the front. Older requests are |
| 217 // prioritized over requests of equal priority. | 216 // prioritized over requests of equal priority. |
| 218 // | 217 // |
| 219 // static | 218 // static |
| 220 void ClientSocketPoolBaseHelper::InsertRequestIntoQueue( | 219 void ClientSocketPoolBaseHelper::InsertRequestIntoQueue( |
| 221 const Request* r, RequestQueue* pending_requests) { | 220 const Request* r, RequestQueue* pending_requests) { |
| 222 RequestQueue::iterator it = pending_requests->begin(); | 221 RequestQueue::iterator it = pending_requests->begin(); |
| 223 while (it != pending_requests->end() && r->priority() >= (*it)->priority()) | 222 while (it != pending_requests->end() && r->priority() >= (*it)->priority()) |
| 224 ++it; | 223 ++it; |
| 225 pending_requests->insert(it, r); | 224 pending_requests->insert(it, r); |
| 226 } | 225 } |
| 227 | 226 |
| 228 // static | 227 // static |
| 229 const ClientSocketPoolBaseHelper::Request* | 228 const ClientSocketPoolBaseHelper::Request* |
| 230 ClientSocketPoolBaseHelper::RemoveRequestFromQueue( | 229 ClientSocketPoolBaseHelper::RemoveRequestFromQueue( |
| 231 const RequestQueue::iterator& it, Group* group) { | 230 const RequestQueue::iterator& it, Group* group) { |
| 232 const Request* req = *it; | 231 const Request* req = *it; |
| 233 group->mutable_pending_requests()->erase(it); | 232 group->mutable_pending_requests()->erase(it); |
| 234 // If there are no more requests, we kill the backup timer. | 233 // If there are no more requests, we kill the backup timer. |
| 235 if (group->pending_requests().empty()) | 234 if (group->pending_requests().empty()) |
| 236 group->CleanupBackupJob(); | 235 group->CleanupBackupJob(); |
| 237 return req; | 236 return req; |
| 238 } | 237 } |
| 239 | 238 |
| 240 void ClientSocketPoolBaseHelper::AddLayeredPool(LayeredPool* pool) { | |
| 241 CHECK(pool); | |
| 242 CHECK(!ContainsKey(higher_layer_pools_, pool)); | |
| 243 higher_layer_pools_.insert(pool); | |
| 244 } | |
| 245 | |
| 246 void ClientSocketPoolBaseHelper::RemoveLayeredPool(LayeredPool* pool) { | |
| 247 CHECK(pool); | |
| 248 CHECK(ContainsKey(higher_layer_pools_, pool)); | |
| 249 higher_layer_pools_.erase(pool); | |
| 250 } | |
| 251 | |
| 252 int ClientSocketPoolBaseHelper::RequestSocket( | 239 int ClientSocketPoolBaseHelper::RequestSocket( |
| 253 const std::string& group_name, | 240 const std::string& group_name, |
| 254 const Request* request) { | 241 const Request* request) { |
| 255 CHECK(request->callback()); | 242 CHECK(request->callback()); |
| 256 CHECK(request->handle()); | 243 CHECK(request->handle()); |
| 257 | 244 |
| 258 // Cleanup any timed-out idle sockets if no timer is used. | 245 // Cleanup any timed-out idle sockets if no timer is used. |
| 259 if (!use_cleanup_timer_) | 246 if (!use_cleanup_timer_) |
| 260 CleanupIdleSockets(false); | 247 CleanupIdleSockets(false); |
| 261 | 248 |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 340 if (AssignIdleSocketToGroup(request, group)) | 327 if (AssignIdleSocketToGroup(request, group)) |
| 341 return OK; | 328 return OK; |
| 342 } | 329 } |
| 343 | 330 |
| 344 if (!preconnecting && group->TryToUsePreconnectConnectJob()) | 331 if (!preconnecting && group->TryToUsePreconnectConnectJob()) |
| 345 return ERR_IO_PENDING; | 332 return ERR_IO_PENDING; |
| 346 | 333 |
| 347 // Can we make another active socket now? | 334 // Can we make another active socket now? |
| 348 if (!group->HasAvailableSocketSlot(max_sockets_per_group_) && | 335 if (!group->HasAvailableSocketSlot(max_sockets_per_group_) && |
| 349 !request->ignore_limits()) { | 336 !request->ignore_limits()) { |
| 350 // TODO(willchan): Consider whether or not we need to close a socket in a | |
| 351 // higher layered group. I don't think this makes sense since we would just | |
| 352 // reuse that socket then if we needed one and wouldn't make it down to this | |
| 353 // layer. | |
| 354 request->net_log().AddEvent( | 337 request->net_log().AddEvent( |
| 355 NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL); | 338 NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL); |
| 356 return ERR_IO_PENDING; | 339 return ERR_IO_PENDING; |
| 357 } | 340 } |
| 358 | 341 |
| 359 if (ReachedMaxSocketsLimit() && !request->ignore_limits()) { | 342 if (ReachedMaxSocketsLimit() && !request->ignore_limits()) { |
| 360 if (idle_socket_count() > 0) { | 343 if (idle_socket_count() > 0) { |
| 361 // There's an idle socket in this pool. Either that's because there's | |
| 362 // still one in this group, but we got here due to preconnecting bypassing | |
| 363 // idle sockets, or because there's an idle socket in another group. | |
| 364 bool closed = CloseOneIdleSocketExceptInGroup(group); | 344 bool closed = CloseOneIdleSocketExceptInGroup(group); |
| 365 if (preconnecting && !closed) | 345 if (preconnecting && !closed) |
| 366 return ERR_PRECONNECT_MAX_SOCKET_LIMIT; | 346 return ERR_PRECONNECT_MAX_SOCKET_LIMIT; |
| 367 } else { | 347 } else { |
| 368 do { | 348 // We could check if we really have a stalled group here, but it requires |
| 369 if (!CloseOneIdleConnectionInLayeredPool()) { | 349 // a scan of all groups, so just flip a flag here, and do the check later. |
| 370 // We could check if we really have a stalled group here, but it | 350 request->net_log().AddEvent( |
| 371 // requires a scan of all groups, so just flip a flag here, and do | 351 NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL); |
| 372 // the check later. | 352 return ERR_IO_PENDING; |
| 373 request->net_log().AddEvent( | |
| 374 NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL); | |
| 375 return ERR_IO_PENDING; | |
| 376 } | |
| 377 } while (ReachedMaxSocketsLimit()); | |
| 378 } | 353 } |
| 379 } | 354 } |
| 380 | 355 |
| 381 // We couldn't find a socket to reuse, and there's space to allocate one, | 356 // We couldn't find a socket to reuse, so allocate and connect a new one. |
| 382 // so allocate and connect a new one. | |
| 383 scoped_ptr<ConnectJob> connect_job( | 357 scoped_ptr<ConnectJob> connect_job( |
| 384 connect_job_factory_->NewConnectJob(group_name, *request, this)); | 358 connect_job_factory_->NewConnectJob(group_name, *request, this)); |
| 385 | 359 |
| 386 connect_job->Initialize(preconnecting); | 360 connect_job->Initialize(preconnecting); |
| 387 int rv = connect_job->Connect(); | 361 int rv = connect_job->Connect(); |
| 388 if (rv == OK) { | 362 if (rv == OK) { |
| 389 LogBoundConnectJobToRequest(connect_job->net_log().source(), request); | 363 LogBoundConnectJobToRequest(connect_job->net_log().source(), request); |
| 390 if (!preconnecting) { | 364 if (!preconnecting) { |
| 391 HandOutSocket(connect_job->ReleaseSocket(), false /* not reused */, | 365 HandOutSocket(connect_job->ReleaseSocket(), false /* not reused */, |
| 392 handle, base::TimeDelta(), group, request->net_log()); | 366 handle, base::TimeDelta(), group, request->net_log()); |
| (...skipping 416 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 809 // its limit, may be left with other stalled groups that could be | 783 // its limit, may be left with other stalled groups that could be |
| 810 // woken. This isn't optimal, but there is no starvation, so to avoid | 784 // woken. This isn't optimal, but there is no starvation, so to avoid |
| 811 // the looping we leave it at this. | 785 // the looping we leave it at this. |
| 812 OnAvailableSocketSlot(top_group_name, top_group); | 786 OnAvailableSocketSlot(top_group_name, top_group); |
| 813 } | 787 } |
| 814 | 788 |
| 815 // Search for the highest priority pending request, amongst the groups that | 789 // Search for the highest priority pending request, amongst the groups that |
| 816 // are not at the |max_sockets_per_group_| limit. Note: for requests with | 790 // are not at the |max_sockets_per_group_| limit. Note: for requests with |
| 817 // the same priority, the winner is based on group hash ordering (and not | 791 // the same priority, the winner is based on group hash ordering (and not |
| 818 // insertion order). | 792 // insertion order). |
| 819 bool ClientSocketPoolBaseHelper::FindTopStalledGroup( | 793 bool ClientSocketPoolBaseHelper::FindTopStalledGroup(Group** group, |
| 820 Group** group, | 794 std::string* group_name) { |
| 821 std::string* group_name) const { | |
| 822 CHECK((group && group_name) || (!group && !group_name)); | |
| 823 Group* top_group = NULL; | 795 Group* top_group = NULL; |
| 824 const std::string* top_group_name = NULL; | 796 const std::string* top_group_name = NULL; |
| 825 bool has_stalled_group = false; | 797 bool has_stalled_group = false; |
| 826 for (GroupMap::const_iterator i = group_map_.begin(); | 798 for (GroupMap::iterator i = group_map_.begin(); |
| 827 i != group_map_.end(); ++i) { | 799 i != group_map_.end(); ++i) { |
| 828 Group* curr_group = i->second; | 800 Group* curr_group = i->second; |
| 829 const RequestQueue& queue = curr_group->pending_requests(); | 801 const RequestQueue& queue = curr_group->pending_requests(); |
| 830 if (queue.empty()) | 802 if (queue.empty()) |
| 831 continue; | 803 continue; |
| 832 if (curr_group->IsStalled(max_sockets_per_group_)) { | 804 if (curr_group->IsStalled(max_sockets_per_group_)) { |
| 833 if (!group) | |
| 834 return true; | |
| 835 has_stalled_group = true; | 805 has_stalled_group = true; |
| 836 bool has_higher_priority = !top_group || | 806 bool has_higher_priority = !top_group || |
| 837 curr_group->TopPendingPriority() < top_group->TopPendingPriority(); | 807 curr_group->TopPendingPriority() < top_group->TopPendingPriority(); |
| 838 if (has_higher_priority) { | 808 if (has_higher_priority) { |
| 839 top_group = curr_group; | 809 top_group = curr_group; |
| 840 top_group_name = &i->first; | 810 top_group_name = &i->first; |
| 841 } | 811 } |
| 842 } | 812 } |
| 843 } | 813 } |
| 844 | 814 |
| 845 if (top_group) { | 815 if (top_group) { |
| 846 CHECK(group); | |
| 847 *group = top_group; | 816 *group = top_group; |
| 848 *group_name = *top_group_name; | 817 *group_name = *top_group_name; |
| 849 } else { | |
| 850 CHECK(!has_stalled_group); | |
| 851 } | 818 } |
| 852 return has_stalled_group; | 819 return has_stalled_group; |
| 853 } | 820 } |
| 854 | 821 |
| 855 void ClientSocketPoolBaseHelper::OnConnectJobComplete( | 822 void ClientSocketPoolBaseHelper::OnConnectJobComplete( |
| 856 int result, ConnectJob* job) { | 823 int result, ConnectJob* job) { |
| 857 DCHECK_NE(ERR_IO_PENDING, result); | 824 DCHECK_NE(ERR_IO_PENDING, result); |
| 858 const std::string group_name = job->group_name(); | 825 const std::string group_name = job->group_name(); |
| 859 GroupMap::iterator group_it = group_map_.find(group_name); | 826 GroupMap::iterator group_it = group_map_.find(group_name); |
| 860 CHECK(group_it != group_map_.end()); | 827 CHECK(group_it != group_map_.end()); |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 913 Flush(); | 880 Flush(); |
| 914 } | 881 } |
| 915 | 882 |
| 916 void ClientSocketPoolBaseHelper::Flush() { | 883 void ClientSocketPoolBaseHelper::Flush() { |
| 917 pool_generation_number_++; | 884 pool_generation_number_++; |
| 918 CancelAllConnectJobs(); | 885 CancelAllConnectJobs(); |
| 919 CloseIdleSockets(); | 886 CloseIdleSockets(); |
| 920 AbortAllRequests(); | 887 AbortAllRequests(); |
| 921 } | 888 } |
| 922 | 889 |
| 923 bool ClientSocketPoolBaseHelper::IsStalled() const { | |
| 924 if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_) | |
| 925 return false; | |
| 926 for (GroupMap::const_iterator it = group_map_.begin(); | |
| 927 it != group_map_.end(); it++) { | |
| 928 if (it->second->IsStalled(max_sockets_per_group_)) | |
| 929 return true; | |
| 930 } | |
| 931 return false; | |
| 932 } | |
| 933 | |
| 934 void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job, | 890 void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job, |
| 935 Group* group) { | 891 Group* group) { |
| 936 CHECK_GT(connecting_socket_count_, 0); | 892 CHECK_GT(connecting_socket_count_, 0); |
| 937 connecting_socket_count_--; | 893 connecting_socket_count_--; |
| 938 | 894 |
| 939 DCHECK(group); | 895 DCHECK(group); |
| 940 DCHECK(ContainsKey(group->jobs(), job)); | 896 DCHECK(ContainsKey(group->jobs(), job)); |
| 941 group->RemoveJob(job); | 897 group->RemoveJob(job); |
| 942 | 898 |
| 943 // If we've got no more jobs for this group, then we no longer need a | 899 // If we've got no more jobs for this group, then we no longer need a |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1060 // Each connecting socket will eventually connect and be handed out. | 1016 // Each connecting socket will eventually connect and be handed out. |
| 1061 int total = handed_out_socket_count_ + connecting_socket_count_ + | 1017 int total = handed_out_socket_count_ + connecting_socket_count_ + |
| 1062 idle_socket_count(); | 1018 idle_socket_count(); |
| 1063 // There can be more sockets than the limit since some requests can ignore | 1019 // There can be more sockets than the limit since some requests can ignore |
| 1064 // the limit | 1020 // the limit |
| 1065 if (total < max_sockets_) | 1021 if (total < max_sockets_) |
| 1066 return false; | 1022 return false; |
| 1067 return true; | 1023 return true; |
| 1068 } | 1024 } |
| 1069 | 1025 |
| 1070 bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() { | 1026 void ClientSocketPoolBaseHelper::CloseOneIdleSocket() { |
| 1071 if (idle_socket_count() == 0) | 1027 CloseOneIdleSocketExceptInGroup(NULL); |
| 1072 return false; | |
| 1073 return CloseOneIdleSocketExceptInGroup(NULL); | |
| 1074 } | 1028 } |
| 1075 | 1029 |
| 1076 bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup( | 1030 bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup( |
| 1077 const Group* exception_group) { | 1031 const Group* exception_group) { |
| 1078 CHECK_GT(idle_socket_count(), 0); | 1032 CHECK_GT(idle_socket_count(), 0); |
| 1079 | 1033 |
| 1080 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) { | 1034 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) { |
| 1081 Group* group = i->second; | 1035 Group* group = i->second; |
| 1082 if (exception_group == group) | 1036 if (exception_group == group) |
| 1083 continue; | 1037 continue; |
| 1084 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets(); | 1038 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets(); |
| 1085 | 1039 |
| 1086 if (!idle_sockets->empty()) { | 1040 if (!idle_sockets->empty()) { |
| 1087 delete idle_sockets->front().socket; | 1041 delete idle_sockets->front().socket; |
| 1088 idle_sockets->pop_front(); | 1042 idle_sockets->pop_front(); |
| 1089 DecrementIdleCount(); | 1043 DecrementIdleCount(); |
| 1090 if (group->IsEmpty()) | 1044 if (group->IsEmpty()) |
| 1091 RemoveGroup(i); | 1045 RemoveGroup(i); |
| 1092 | 1046 |
| 1093 return true; | 1047 return true; |
| 1094 } | 1048 } |
| 1095 } | 1049 } |
| 1096 | 1050 |
| 1051 if (!exception_group) |
| 1052 LOG(DFATAL) << "No idle socket found to close!."; |
| 1053 |
| 1097 return false; | 1054 return false; |
| 1098 } | 1055 } |
| 1099 | 1056 |
| 1100 bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInLayeredPool() { | |
| 1101 // This pool doesn't have any idle sockets. It's possible that a pool at a | |
| 1102 // higher layer is holding one of this sockets active, but it's actually idle. | |
| 1103 // Query the higher layers. | |
| 1104 for (std::set<LayeredPool*>::const_iterator it = higher_layer_pools_.begin(); | |
| 1105 it != higher_layer_pools_.end(); ++it) { | |
| 1106 if ((*it)->CloseOneIdleConnection()) | |
| 1107 return true; | |
| 1108 } | |
| 1109 return false; | |
| 1110 } | |
| 1111 | |
| 1112 void ClientSocketPoolBaseHelper::InvokeUserCallbackLater( | 1057 void ClientSocketPoolBaseHelper::InvokeUserCallbackLater( |
| 1113 ClientSocketHandle* handle, OldCompletionCallback* callback, int rv) { | 1058 ClientSocketHandle* handle, OldCompletionCallback* callback, int rv) { |
| 1114 CHECK(!ContainsKey(pending_callback_map_, handle)); | 1059 CHECK(!ContainsKey(pending_callback_map_, handle)); |
| 1115 pending_callback_map_[handle] = CallbackResultPair(callback, rv); | 1060 pending_callback_map_[handle] = CallbackResultPair(callback, rv); |
| 1116 MessageLoop::current()->PostTask( | 1061 MessageLoop::current()->PostTask( |
| 1117 FROM_HERE, | 1062 FROM_HERE, |
| 1118 method_factory_.NewRunnableMethod( | 1063 method_factory_.NewRunnableMethod( |
| 1119 &ClientSocketPoolBaseHelper::InvokeUserCallback, | 1064 &ClientSocketPoolBaseHelper::InvokeUserCallback, |
| 1120 handle)); | 1065 handle)); |
| 1121 } | 1066 } |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1206 // Delete active jobs. | 1151 // Delete active jobs. |
| 1207 STLDeleteElements(&jobs_); | 1152 STLDeleteElements(&jobs_); |
| 1208 | 1153 |
| 1209 // Cancel pending backup job. | 1154 // Cancel pending backup job. |
| 1210 method_factory_.RevokeAll(); | 1155 method_factory_.RevokeAll(); |
| 1211 } | 1156 } |
| 1212 | 1157 |
| 1213 } // namespace internal | 1158 } // namespace internal |
| 1214 | 1159 |
| 1215 } // namespace net | 1160 } // namespace net |
| OLD | NEW |