| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "net/socket/client_socket_pool_base.h" | |
| 6 | |
| 7 #include "base/compiler_specific.h" | |
| 8 #include "base/format_macros.h" | |
| 9 #include "base/logging.h" | |
| 10 #include "base/message_loop/message_loop.h" | |
| 11 #include "base/profiler/scoped_tracker.h" | |
| 12 #include "base/stl_util.h" | |
| 13 #include "base/strings/string_util.h" | |
| 14 #include "base/time/time.h" | |
| 15 #include "base/values.h" | |
| 16 #include "net/base/net_errors.h" | |
| 17 #include "net/base/net_log.h" | |
| 18 | |
| 19 using base::TimeDelta; | |
| 20 | |
| 21 namespace net { | |
| 22 | |
| 23 namespace { | |
| 24 | |
| 25 // Indicate whether we should enable idle socket cleanup timer. When timer is | |
| 26 // disabled, sockets are closed next time a socket request is made. | |
| 27 bool g_cleanup_timer_enabled = true; | |
| 28 | |
| 29 // The timeout value, in seconds, used to clean up idle sockets that can't be | |
| 30 // reused. | |
| 31 // | |
| 32 // Note: It's important to close idle sockets that have received data as soon | |
| 33 // as possible because the received data may cause BSOD on Windows XP under | |
| 34 // some conditions. See http://crbug.com/4606. | |
| 35 const int kCleanupInterval = 10; // DO NOT INCREASE THIS TIMEOUT. | |
| 36 | |
| 37 // Indicate whether or not we should establish a new transport layer connection | |
| 38 // after a certain timeout has passed without receiving an ACK. | |
| 39 bool g_connect_backup_jobs_enabled = true; | |
| 40 | |
| 41 } // namespace | |
| 42 | |
| 43 ConnectJob::ConnectJob(const std::string& group_name, | |
| 44 base::TimeDelta timeout_duration, | |
| 45 RequestPriority priority, | |
| 46 Delegate* delegate, | |
| 47 const BoundNetLog& net_log) | |
| 48 : group_name_(group_name), | |
| 49 timeout_duration_(timeout_duration), | |
| 50 priority_(priority), | |
| 51 delegate_(delegate), | |
| 52 net_log_(net_log), | |
| 53 idle_(true) { | |
| 54 DCHECK(!group_name.empty()); | |
| 55 DCHECK(delegate); | |
| 56 net_log.BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB, | |
| 57 NetLog::StringCallback("group_name", &group_name_)); | |
| 58 } | |
| 59 | |
| 60 ConnectJob::~ConnectJob() { | |
| 61 net_log().EndEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB); | |
| 62 } | |
| 63 | |
| 64 scoped_ptr<StreamSocket> ConnectJob::PassSocket() { | |
| 65 return socket_.Pass(); | |
| 66 } | |
| 67 | |
| 68 int ConnectJob::Connect() { | |
| 69 if (timeout_duration_ != base::TimeDelta()) | |
| 70 timer_.Start(FROM_HERE, timeout_duration_, this, &ConnectJob::OnTimeout); | |
| 71 | |
| 72 idle_ = false; | |
| 73 | |
| 74 LogConnectStart(); | |
| 75 | |
| 76 int rv = ConnectInternal(); | |
| 77 | |
| 78 if (rv != ERR_IO_PENDING) { | |
| 79 LogConnectCompletion(rv); | |
| 80 delegate_ = NULL; | |
| 81 } | |
| 82 | |
| 83 return rv; | |
| 84 } | |
| 85 | |
| 86 void ConnectJob::SetSocket(scoped_ptr<StreamSocket> socket) { | |
| 87 if (socket) { | |
| 88 net_log().AddEvent(NetLog::TYPE_CONNECT_JOB_SET_SOCKET, | |
| 89 socket->NetLog().source().ToEventParametersCallback()); | |
| 90 } | |
| 91 socket_ = socket.Pass(); | |
| 92 } | |
| 93 | |
| 94 void ConnectJob::NotifyDelegateOfCompletion(int rv) { | |
| 95 // The delegate will own |this|. | |
| 96 Delegate* delegate = delegate_; | |
| 97 delegate_ = NULL; | |
| 98 | |
| 99 LogConnectCompletion(rv); | |
| 100 delegate->OnConnectJobComplete(rv, this); | |
| 101 } | |
| 102 | |
| 103 void ConnectJob::ResetTimer(base::TimeDelta remaining_time) { | |
| 104 timer_.Stop(); | |
| 105 timer_.Start(FROM_HERE, remaining_time, this, &ConnectJob::OnTimeout); | |
| 106 } | |
| 107 | |
| 108 void ConnectJob::LogConnectStart() { | |
| 109 connect_timing_.connect_start = base::TimeTicks::Now(); | |
| 110 net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT); | |
| 111 } | |
| 112 | |
| 113 void ConnectJob::LogConnectCompletion(int net_error) { | |
| 114 connect_timing_.connect_end = base::TimeTicks::Now(); | |
| 115 net_log().EndEventWithNetErrorCode( | |
| 116 NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_CONNECT, net_error); | |
| 117 } | |
| 118 | |
| 119 void ConnectJob::OnTimeout() { | |
| 120 // Make sure the socket is NULL before calling into |delegate|. | |
| 121 SetSocket(scoped_ptr<StreamSocket>()); | |
| 122 | |
| 123 net_log_.AddEvent(NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_TIMED_OUT); | |
| 124 | |
| 125 NotifyDelegateOfCompletion(ERR_TIMED_OUT); | |
| 126 } | |
| 127 | |
| 128 namespace internal { | |
| 129 | |
| 130 ClientSocketPoolBaseHelper::Request::Request( | |
| 131 ClientSocketHandle* handle, | |
| 132 const CompletionCallback& callback, | |
| 133 RequestPriority priority, | |
| 134 bool ignore_limits, | |
| 135 Flags flags, | |
| 136 const BoundNetLog& net_log) | |
| 137 : handle_(handle), | |
| 138 callback_(callback), | |
| 139 priority_(priority), | |
| 140 ignore_limits_(ignore_limits), | |
| 141 flags_(flags), | |
| 142 net_log_(net_log) { | |
| 143 if (ignore_limits_) | |
| 144 DCHECK_EQ(priority_, MAXIMUM_PRIORITY); | |
| 145 } | |
| 146 | |
| 147 ClientSocketPoolBaseHelper::Request::~Request() {} | |
| 148 | |
| 149 ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper( | |
| 150 HigherLayeredPool* pool, | |
| 151 int max_sockets, | |
| 152 int max_sockets_per_group, | |
| 153 base::TimeDelta unused_idle_socket_timeout, | |
| 154 base::TimeDelta used_idle_socket_timeout, | |
| 155 ConnectJobFactory* connect_job_factory) | |
| 156 : idle_socket_count_(0), | |
| 157 connecting_socket_count_(0), | |
| 158 handed_out_socket_count_(0), | |
| 159 max_sockets_(max_sockets), | |
| 160 max_sockets_per_group_(max_sockets_per_group), | |
| 161 use_cleanup_timer_(g_cleanup_timer_enabled), | |
| 162 unused_idle_socket_timeout_(unused_idle_socket_timeout), | |
| 163 used_idle_socket_timeout_(used_idle_socket_timeout), | |
| 164 connect_job_factory_(connect_job_factory), | |
| 165 connect_backup_jobs_enabled_(false), | |
| 166 pool_generation_number_(0), | |
| 167 pool_(pool), | |
| 168 weak_factory_(this) { | |
| 169 DCHECK_LE(0, max_sockets_per_group); | |
| 170 DCHECK_LE(max_sockets_per_group, max_sockets); | |
| 171 | |
| 172 NetworkChangeNotifier::AddIPAddressObserver(this); | |
| 173 } | |
| 174 | |
| 175 ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() { | |
| 176 // Clean up any idle sockets and pending connect jobs. Assert that we have no | |
| 177 // remaining active sockets or pending requests. They should have all been | |
| 178 // cleaned up prior to |this| being destroyed. | |
| 179 FlushWithError(ERR_ABORTED); | |
| 180 DCHECK(group_map_.empty()); | |
| 181 DCHECK(pending_callback_map_.empty()); | |
| 182 DCHECK_EQ(0, connecting_socket_count_); | |
| 183 CHECK(higher_pools_.empty()); | |
| 184 | |
| 185 NetworkChangeNotifier::RemoveIPAddressObserver(this); | |
| 186 | |
| 187 // Remove from lower layer pools. | |
| 188 for (std::set<LowerLayeredPool*>::iterator it = lower_pools_.begin(); | |
| 189 it != lower_pools_.end(); | |
| 190 ++it) { | |
| 191 (*it)->RemoveHigherLayeredPool(pool_); | |
| 192 } | |
| 193 } | |
| 194 | |
| 195 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair() | |
| 196 : result(OK) { | |
| 197 } | |
| 198 | |
| 199 ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair( | |
| 200 const CompletionCallback& callback_in, int result_in) | |
| 201 : callback(callback_in), | |
| 202 result(result_in) { | |
| 203 } | |
| 204 | |
| 205 ClientSocketPoolBaseHelper::CallbackResultPair::~CallbackResultPair() {} | |
| 206 | |
| 207 bool ClientSocketPoolBaseHelper::IsStalled() const { | |
| 208 // If a lower layer pool is stalled, consider |this| stalled as well. | |
| 209 for (std::set<LowerLayeredPool*>::const_iterator it = lower_pools_.begin(); | |
| 210 it != lower_pools_.end(); | |
| 211 ++it) { | |
| 212 if ((*it)->IsStalled()) | |
| 213 return true; | |
| 214 } | |
| 215 | |
| 216 // If fewer than |max_sockets_| are in use, then clearly |this| is not | |
| 217 // stalled. | |
| 218 if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_) | |
| 219 return false; | |
| 220 // So in order to be stalled, |this| must be using at least |max_sockets_| AND | |
| 221 // |this| must have a request that is actually stalled on the global socket | |
| 222 // limit. To find such a request, look for a group that has more requests | |
| 223 // than jobs AND where the number of sockets is less than | |
| 224 // |max_sockets_per_group_|. (If the number of sockets is equal to | |
| 225 // |max_sockets_per_group_|, then the request is stalled on the group limit, | |
| 226 // which does not count.) | |
| 227 for (GroupMap::const_iterator it = group_map_.begin(); | |
| 228 it != group_map_.end(); ++it) { | |
| 229 if (it->second->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) | |
| 230 return true; | |
| 231 } | |
| 232 return false; | |
| 233 } | |
| 234 | |
| 235 void ClientSocketPoolBaseHelper::AddLowerLayeredPool( | |
| 236 LowerLayeredPool* lower_pool) { | |
| 237 DCHECK(pool_); | |
| 238 CHECK(!ContainsKey(lower_pools_, lower_pool)); | |
| 239 lower_pools_.insert(lower_pool); | |
| 240 lower_pool->AddHigherLayeredPool(pool_); | |
| 241 } | |
| 242 | |
| 243 void ClientSocketPoolBaseHelper::AddHigherLayeredPool( | |
| 244 HigherLayeredPool* higher_pool) { | |
| 245 CHECK(higher_pool); | |
| 246 CHECK(!ContainsKey(higher_pools_, higher_pool)); | |
| 247 higher_pools_.insert(higher_pool); | |
| 248 } | |
| 249 | |
| 250 void ClientSocketPoolBaseHelper::RemoveHigherLayeredPool( | |
| 251 HigherLayeredPool* higher_pool) { | |
| 252 CHECK(higher_pool); | |
| 253 CHECK(ContainsKey(higher_pools_, higher_pool)); | |
| 254 higher_pools_.erase(higher_pool); | |
| 255 } | |
| 256 | |
| 257 int ClientSocketPoolBaseHelper::RequestSocket( | |
| 258 const std::string& group_name, | |
| 259 scoped_ptr<const Request> request) { | |
| 260 CHECK(!request->callback().is_null()); | |
| 261 CHECK(request->handle()); | |
| 262 | |
| 263 // Cleanup any timed-out idle sockets if no timer is used. | |
| 264 if (!use_cleanup_timer_) | |
| 265 CleanupIdleSockets(false); | |
| 266 | |
| 267 request->net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL); | |
| 268 Group* group = GetOrCreateGroup(group_name); | |
| 269 | |
| 270 int rv = RequestSocketInternal(group_name, *request); | |
| 271 if (rv != ERR_IO_PENDING) { | |
| 272 request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv); | |
| 273 CHECK(!request->handle()->is_initialized()); | |
| 274 request.reset(); | |
| 275 } else { | |
| 276 group->InsertPendingRequest(request.Pass()); | |
| 277 // Have to do this asynchronously, as closing sockets in higher level pools | |
| 278 // call back in to |this|, which will cause all sorts of fun and exciting | |
| 279 // re-entrancy issues if the socket pool is doing something else at the | |
| 280 // time. | |
| 281 if (group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) { | |
| 282 base::MessageLoop::current()->PostTask( | |
| 283 FROM_HERE, | |
| 284 base::Bind( | |
| 285 &ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools, | |
| 286 weak_factory_.GetWeakPtr())); | |
| 287 } | |
| 288 } | |
| 289 return rv; | |
| 290 } | |
| 291 | |
| 292 void ClientSocketPoolBaseHelper::RequestSockets( | |
| 293 const std::string& group_name, | |
| 294 const Request& request, | |
| 295 int num_sockets) { | |
| 296 DCHECK(request.callback().is_null()); | |
| 297 DCHECK(!request.handle()); | |
| 298 | |
| 299 // Cleanup any timed out idle sockets if no timer is used. | |
| 300 if (!use_cleanup_timer_) | |
| 301 CleanupIdleSockets(false); | |
| 302 | |
| 303 if (num_sockets > max_sockets_per_group_) { | |
| 304 num_sockets = max_sockets_per_group_; | |
| 305 } | |
| 306 | |
| 307 request.net_log().BeginEvent( | |
| 308 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS, | |
| 309 NetLog::IntegerCallback("num_sockets", num_sockets)); | |
| 310 | |
| 311 Group* group = GetOrCreateGroup(group_name); | |
| 312 | |
| 313 // RequestSocketsInternal() may delete the group. | |
| 314 bool deleted_group = false; | |
| 315 | |
| 316 int rv = OK; | |
| 317 for (int num_iterations_left = num_sockets; | |
| 318 group->NumActiveSocketSlots() < num_sockets && | |
| 319 num_iterations_left > 0 ; num_iterations_left--) { | |
| 320 rv = RequestSocketInternal(group_name, request); | |
| 321 if (rv < 0 && rv != ERR_IO_PENDING) { | |
| 322 // We're encountering a synchronous error. Give up. | |
| 323 if (!ContainsKey(group_map_, group_name)) | |
| 324 deleted_group = true; | |
| 325 break; | |
| 326 } | |
| 327 if (!ContainsKey(group_map_, group_name)) { | |
| 328 // Unexpected. The group should only be getting deleted on synchronous | |
| 329 // error. | |
| 330 NOTREACHED(); | |
| 331 deleted_group = true; | |
| 332 break; | |
| 333 } | |
| 334 } | |
| 335 | |
| 336 if (!deleted_group && group->IsEmpty()) | |
| 337 RemoveGroup(group_name); | |
| 338 | |
| 339 if (rv == ERR_IO_PENDING) | |
| 340 rv = OK; | |
| 341 request.net_log().EndEventWithNetErrorCode( | |
| 342 NetLog::TYPE_SOCKET_POOL_CONNECTING_N_SOCKETS, rv); | |
| 343 } | |
| 344 | |
| 345 int ClientSocketPoolBaseHelper::RequestSocketInternal( | |
| 346 const std::string& group_name, | |
| 347 const Request& request) { | |
| 348 ClientSocketHandle* const handle = request.handle(); | |
| 349 const bool preconnecting = !handle; | |
| 350 Group* group = GetOrCreateGroup(group_name); | |
| 351 | |
| 352 if (!(request.flags() & NO_IDLE_SOCKETS)) { | |
| 353 // Try to reuse a socket. | |
| 354 if (AssignIdleSocketToRequest(request, group)) | |
| 355 return OK; | |
| 356 } | |
| 357 | |
| 358 // If there are more ConnectJobs than pending requests, don't need to do | |
| 359 // anything. Can just wait for the extra job to connect, and then assign it | |
| 360 // to the request. | |
| 361 if (!preconnecting && group->TryToUseUnassignedConnectJob()) | |
| 362 return ERR_IO_PENDING; | |
| 363 | |
| 364 // Can we make another active socket now? | |
| 365 if (!group->HasAvailableSocketSlot(max_sockets_per_group_) && | |
| 366 !request.ignore_limits()) { | |
| 367 // TODO(willchan): Consider whether or not we need to close a socket in a | |
| 368 // higher layered group. I don't think this makes sense since we would just | |
| 369 // reuse that socket then if we needed one and wouldn't make it down to this | |
| 370 // layer. | |
| 371 request.net_log().AddEvent( | |
| 372 NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP); | |
| 373 return ERR_IO_PENDING; | |
| 374 } | |
| 375 | |
| 376 if (ReachedMaxSocketsLimit() && !request.ignore_limits()) { | |
| 377 // NOTE(mmenke): Wonder if we really need different code for each case | |
| 378 // here. Only reason for them now seems to be preconnects. | |
| 379 if (idle_socket_count() > 0) { | |
| 380 // There's an idle socket in this pool. Either that's because there's | |
| 381 // still one in this group, but we got here due to preconnecting bypassing | |
| 382 // idle sockets, or because there's an idle socket in another group. | |
| 383 bool closed = CloseOneIdleSocketExceptInGroup(group); | |
| 384 if (preconnecting && !closed) | |
| 385 return ERR_PRECONNECT_MAX_SOCKET_LIMIT; | |
| 386 } else { | |
| 387 // We could check if we really have a stalled group here, but it requires | |
| 388 // a scan of all groups, so just flip a flag here, and do the check later. | |
| 389 request.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS); | |
| 390 return ERR_IO_PENDING; | |
| 391 } | |
| 392 } | |
| 393 | |
| 394 // We couldn't find a socket to reuse, and there's space to allocate one, | |
| 395 // so allocate and connect a new one. | |
| 396 scoped_ptr<ConnectJob> connect_job( | |
| 397 connect_job_factory_->NewConnectJob(group_name, request, this)); | |
| 398 | |
| 399 int rv = connect_job->Connect(); | |
| 400 if (rv == OK) { | |
| 401 LogBoundConnectJobToRequest(connect_job->net_log().source(), request); | |
| 402 if (!preconnecting) { | |
| 403 HandOutSocket(connect_job->PassSocket(), ClientSocketHandle::UNUSED, | |
| 404 connect_job->connect_timing(), handle, base::TimeDelta(), | |
| 405 group, request.net_log()); | |
| 406 } else { | |
| 407 AddIdleSocket(connect_job->PassSocket(), group); | |
| 408 } | |
| 409 } else if (rv == ERR_IO_PENDING) { | |
| 410 // If we don't have any sockets in this group, set a timer for potentially | |
| 411 // creating a new one. If the SYN is lost, this backup socket may complete | |
| 412 // before the slow socket, improving end user latency. | |
| 413 if (connect_backup_jobs_enabled_ && group->IsEmpty()) { | |
| 414 group->StartBackupJobTimer(group_name, this); | |
| 415 } | |
| 416 | |
| 417 connecting_socket_count_++; | |
| 418 | |
| 419 group->AddJob(connect_job.Pass(), preconnecting); | |
| 420 } else { | |
| 421 LogBoundConnectJobToRequest(connect_job->net_log().source(), request); | |
| 422 scoped_ptr<StreamSocket> error_socket; | |
| 423 if (!preconnecting) { | |
| 424 DCHECK(handle); | |
| 425 connect_job->GetAdditionalErrorState(handle); | |
| 426 error_socket = connect_job->PassSocket(); | |
| 427 } | |
| 428 if (error_socket) { | |
| 429 HandOutSocket(error_socket.Pass(), ClientSocketHandle::UNUSED, | |
| 430 connect_job->connect_timing(), handle, base::TimeDelta(), | |
| 431 group, request.net_log()); | |
| 432 } else if (group->IsEmpty()) { | |
| 433 RemoveGroup(group_name); | |
| 434 } | |
| 435 } | |
| 436 | |
| 437 return rv; | |
| 438 } | |
| 439 | |
| 440 bool ClientSocketPoolBaseHelper::AssignIdleSocketToRequest( | |
| 441 const Request& request, Group* group) { | |
| 442 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets(); | |
| 443 std::list<IdleSocket>::iterator idle_socket_it = idle_sockets->end(); | |
| 444 | |
| 445 // Iterate through the idle sockets forwards (oldest to newest) | |
| 446 // * Delete any disconnected ones. | |
| 447 // * If we find a used idle socket, assign to |idle_socket|. At the end, | |
| 448 // the |idle_socket_it| will be set to the newest used idle socket. | |
| 449 for (std::list<IdleSocket>::iterator it = idle_sockets->begin(); | |
| 450 it != idle_sockets->end();) { | |
| 451 if (!it->IsUsable()) { | |
| 452 DecrementIdleCount(); | |
| 453 delete it->socket; | |
| 454 it = idle_sockets->erase(it); | |
| 455 continue; | |
| 456 } | |
| 457 | |
| 458 if (it->socket->WasEverUsed()) { | |
| 459 // We found one we can reuse! | |
| 460 idle_socket_it = it; | |
| 461 } | |
| 462 | |
| 463 ++it; | |
| 464 } | |
| 465 | |
| 466 // If we haven't found an idle socket, that means there are no used idle | |
| 467 // sockets. Pick the oldest (first) idle socket (FIFO). | |
| 468 | |
| 469 if (idle_socket_it == idle_sockets->end() && !idle_sockets->empty()) | |
| 470 idle_socket_it = idle_sockets->begin(); | |
| 471 | |
| 472 if (idle_socket_it != idle_sockets->end()) { | |
| 473 DecrementIdleCount(); | |
| 474 base::TimeDelta idle_time = | |
| 475 base::TimeTicks::Now() - idle_socket_it->start_time; | |
| 476 IdleSocket idle_socket = *idle_socket_it; | |
| 477 idle_sockets->erase(idle_socket_it); | |
| 478 // TODO(davidben): If |idle_time| is under some low watermark, consider | |
| 479 // treating as UNUSED rather than UNUSED_IDLE. This will avoid | |
| 480 // HttpNetworkTransaction retrying on some errors. | |
| 481 ClientSocketHandle::SocketReuseType reuse_type = | |
| 482 idle_socket.socket->WasEverUsed() ? | |
| 483 ClientSocketHandle::REUSED_IDLE : | |
| 484 ClientSocketHandle::UNUSED_IDLE; | |
| 485 HandOutSocket( | |
| 486 scoped_ptr<StreamSocket>(idle_socket.socket), | |
| 487 reuse_type, | |
| 488 LoadTimingInfo::ConnectTiming(), | |
| 489 request.handle(), | |
| 490 idle_time, | |
| 491 group, | |
| 492 request.net_log()); | |
| 493 return true; | |
| 494 } | |
| 495 | |
| 496 return false; | |
| 497 } | |
| 498 | |
| 499 // static | |
| 500 void ClientSocketPoolBaseHelper::LogBoundConnectJobToRequest( | |
| 501 const NetLog::Source& connect_job_source, const Request& request) { | |
| 502 request.net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB, | |
| 503 connect_job_source.ToEventParametersCallback()); | |
| 504 } | |
| 505 | |
| 506 void ClientSocketPoolBaseHelper::CancelRequest( | |
| 507 const std::string& group_name, ClientSocketHandle* handle) { | |
| 508 PendingCallbackMap::iterator callback_it = pending_callback_map_.find(handle); | |
| 509 if (callback_it != pending_callback_map_.end()) { | |
| 510 int result = callback_it->second.result; | |
| 511 pending_callback_map_.erase(callback_it); | |
| 512 scoped_ptr<StreamSocket> socket = handle->PassSocket(); | |
| 513 if (socket) { | |
| 514 if (result != OK) | |
| 515 socket->Disconnect(); | |
| 516 ReleaseSocket(handle->group_name(), socket.Pass(), handle->id()); | |
| 517 } | |
| 518 return; | |
| 519 } | |
| 520 | |
| 521 CHECK(ContainsKey(group_map_, group_name)); | |
| 522 | |
| 523 Group* group = GetOrCreateGroup(group_name); | |
| 524 | |
| 525 // Search pending_requests for matching handle. | |
| 526 scoped_ptr<const Request> request = | |
| 527 group->FindAndRemovePendingRequest(handle); | |
| 528 if (request) { | |
| 529 request->net_log().AddEvent(NetLog::TYPE_CANCELLED); | |
| 530 request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL); | |
| 531 | |
| 532 // We let the job run, unless we're at the socket limit and there is | |
| 533 // not another request waiting on the job. | |
| 534 if (group->jobs().size() > group->pending_request_count() && | |
| 535 ReachedMaxSocketsLimit()) { | |
| 536 RemoveConnectJob(*group->jobs().begin(), group); | |
| 537 CheckForStalledSocketGroups(); | |
| 538 } | |
| 539 } | |
| 540 } | |
| 541 | |
| 542 bool ClientSocketPoolBaseHelper::HasGroup(const std::string& group_name) const { | |
| 543 return ContainsKey(group_map_, group_name); | |
| 544 } | |
| 545 | |
| 546 void ClientSocketPoolBaseHelper::CloseIdleSockets() { | |
| 547 CleanupIdleSockets(true); | |
| 548 DCHECK_EQ(0, idle_socket_count_); | |
| 549 } | |
| 550 | |
| 551 int ClientSocketPoolBaseHelper::IdleSocketCountInGroup( | |
| 552 const std::string& group_name) const { | |
| 553 GroupMap::const_iterator i = group_map_.find(group_name); | |
| 554 CHECK(i != group_map_.end()); | |
| 555 | |
| 556 return i->second->idle_sockets().size(); | |
| 557 } | |
| 558 | |
| 559 LoadState ClientSocketPoolBaseHelper::GetLoadState( | |
| 560 const std::string& group_name, | |
| 561 const ClientSocketHandle* handle) const { | |
| 562 if (ContainsKey(pending_callback_map_, handle)) | |
| 563 return LOAD_STATE_CONNECTING; | |
| 564 | |
| 565 if (!ContainsKey(group_map_, group_name)) { | |
| 566 NOTREACHED() << "ClientSocketPool does not contain group: " << group_name | |
| 567 << " for handle: " << handle; | |
| 568 return LOAD_STATE_IDLE; | |
| 569 } | |
| 570 | |
| 571 // Can't use operator[] since it is non-const. | |
| 572 const Group& group = *group_map_.find(group_name)->second; | |
| 573 | |
| 574 if (group.HasConnectJobForHandle(handle)) { | |
| 575 // Just return the state of the farthest along ConnectJob for the first | |
| 576 // group.jobs().size() pending requests. | |
| 577 LoadState max_state = LOAD_STATE_IDLE; | |
| 578 for (ConnectJobSet::const_iterator job_it = group.jobs().begin(); | |
| 579 job_it != group.jobs().end(); ++job_it) { | |
| 580 max_state = std::max(max_state, (*job_it)->GetLoadState()); | |
| 581 } | |
| 582 return max_state; | |
| 583 } | |
| 584 | |
| 585 if (group.IsStalledOnPoolMaxSockets(max_sockets_per_group_)) | |
| 586 return LOAD_STATE_WAITING_FOR_STALLED_SOCKET_POOL; | |
| 587 return LOAD_STATE_WAITING_FOR_AVAILABLE_SOCKET; | |
| 588 } | |
| 589 | |
| 590 base::DictionaryValue* ClientSocketPoolBaseHelper::GetInfoAsValue( | |
| 591 const std::string& name, const std::string& type) const { | |
| 592 base::DictionaryValue* dict = new base::DictionaryValue(); | |
| 593 dict->SetString("name", name); | |
| 594 dict->SetString("type", type); | |
| 595 dict->SetInteger("handed_out_socket_count", handed_out_socket_count_); | |
| 596 dict->SetInteger("connecting_socket_count", connecting_socket_count_); | |
| 597 dict->SetInteger("idle_socket_count", idle_socket_count_); | |
| 598 dict->SetInteger("max_socket_count", max_sockets_); | |
| 599 dict->SetInteger("max_sockets_per_group", max_sockets_per_group_); | |
| 600 dict->SetInteger("pool_generation_number", pool_generation_number_); | |
| 601 | |
| 602 if (group_map_.empty()) | |
| 603 return dict; | |
| 604 | |
| 605 base::DictionaryValue* all_groups_dict = new base::DictionaryValue(); | |
| 606 for (GroupMap::const_iterator it = group_map_.begin(); | |
| 607 it != group_map_.end(); it++) { | |
| 608 const Group* group = it->second; | |
| 609 base::DictionaryValue* group_dict = new base::DictionaryValue(); | |
| 610 | |
| 611 group_dict->SetInteger("pending_request_count", | |
| 612 group->pending_request_count()); | |
| 613 if (group->has_pending_requests()) { | |
| 614 group_dict->SetString( | |
| 615 "top_pending_priority", | |
| 616 RequestPriorityToString(group->TopPendingPriority())); | |
| 617 } | |
| 618 | |
| 619 group_dict->SetInteger("active_socket_count", group->active_socket_count()); | |
| 620 | |
| 621 base::ListValue* idle_socket_list = new base::ListValue(); | |
| 622 std::list<IdleSocket>::const_iterator idle_socket; | |
| 623 for (idle_socket = group->idle_sockets().begin(); | |
| 624 idle_socket != group->idle_sockets().end(); | |
| 625 idle_socket++) { | |
| 626 int source_id = idle_socket->socket->NetLog().source().id; | |
| 627 idle_socket_list->Append(new base::FundamentalValue(source_id)); | |
| 628 } | |
| 629 group_dict->Set("idle_sockets", idle_socket_list); | |
| 630 | |
| 631 base::ListValue* connect_jobs_list = new base::ListValue(); | |
| 632 std::set<ConnectJob*>::const_iterator job = group->jobs().begin(); | |
| 633 for (job = group->jobs().begin(); job != group->jobs().end(); job++) { | |
| 634 int source_id = (*job)->net_log().source().id; | |
| 635 connect_jobs_list->Append(new base::FundamentalValue(source_id)); | |
| 636 } | |
| 637 group_dict->Set("connect_jobs", connect_jobs_list); | |
| 638 | |
| 639 group_dict->SetBoolean("is_stalled", | |
| 640 group->IsStalledOnPoolMaxSockets( | |
| 641 max_sockets_per_group_)); | |
| 642 group_dict->SetBoolean("backup_job_timer_is_running", | |
| 643 group->BackupJobTimerIsRunning()); | |
| 644 | |
| 645 all_groups_dict->SetWithoutPathExpansion(it->first, group_dict); | |
| 646 } | |
| 647 dict->Set("groups", all_groups_dict); | |
| 648 return dict; | |
| 649 } | |
| 650 | |
| 651 bool ClientSocketPoolBaseHelper::IdleSocket::IsUsable() const { | |
| 652 if (socket->WasEverUsed()) | |
| 653 return socket->IsConnectedAndIdle(); | |
| 654 return socket->IsConnected(); | |
| 655 } | |
| 656 | |
| 657 bool ClientSocketPoolBaseHelper::IdleSocket::ShouldCleanup( | |
| 658 base::TimeTicks now, | |
| 659 base::TimeDelta timeout) const { | |
| 660 bool timed_out = (now - start_time) >= timeout; | |
| 661 if (timed_out) | |
| 662 return true; | |
| 663 return !IsUsable(); | |
| 664 } | |
| 665 | |
| 666 void ClientSocketPoolBaseHelper::CleanupIdleSockets(bool force) { | |
| 667 if (idle_socket_count_ == 0) | |
| 668 return; | |
| 669 | |
| 670 // Current time value. Retrieving it once at the function start rather than | |
| 671 // inside the inner loop, since it shouldn't change by any meaningful amount. | |
| 672 base::TimeTicks now = base::TimeTicks::Now(); | |
| 673 | |
| 674 GroupMap::iterator i = group_map_.begin(); | |
| 675 while (i != group_map_.end()) { | |
| 676 Group* group = i->second; | |
| 677 | |
| 678 std::list<IdleSocket>::iterator j = group->mutable_idle_sockets()->begin(); | |
| 679 while (j != group->idle_sockets().end()) { | |
| 680 base::TimeDelta timeout = | |
| 681 j->socket->WasEverUsed() ? | |
| 682 used_idle_socket_timeout_ : unused_idle_socket_timeout_; | |
| 683 if (force || j->ShouldCleanup(now, timeout)) { | |
| 684 delete j->socket; | |
| 685 j = group->mutable_idle_sockets()->erase(j); | |
| 686 DecrementIdleCount(); | |
| 687 } else { | |
| 688 ++j; | |
| 689 } | |
| 690 } | |
| 691 | |
| 692 // Delete group if no longer needed. | |
| 693 if (group->IsEmpty()) { | |
| 694 RemoveGroup(i++); | |
| 695 } else { | |
| 696 ++i; | |
| 697 } | |
| 698 } | |
| 699 } | |
| 700 | |
| 701 ClientSocketPoolBaseHelper::Group* ClientSocketPoolBaseHelper::GetOrCreateGroup( | |
| 702 const std::string& group_name) { | |
| 703 GroupMap::iterator it = group_map_.find(group_name); | |
| 704 if (it != group_map_.end()) | |
| 705 return it->second; | |
| 706 Group* group = new Group; | |
| 707 group_map_[group_name] = group; | |
| 708 return group; | |
| 709 } | |
| 710 | |
| 711 void ClientSocketPoolBaseHelper::RemoveGroup(const std::string& group_name) { | |
| 712 GroupMap::iterator it = group_map_.find(group_name); | |
| 713 CHECK(it != group_map_.end()); | |
| 714 | |
| 715 RemoveGroup(it); | |
| 716 } | |
| 717 | |
| 718 void ClientSocketPoolBaseHelper::RemoveGroup(GroupMap::iterator it) { | |
| 719 delete it->second; | |
| 720 group_map_.erase(it); | |
| 721 } | |
| 722 | |
| 723 // static | |
| 724 bool ClientSocketPoolBaseHelper::connect_backup_jobs_enabled() { | |
| 725 return g_connect_backup_jobs_enabled; | |
| 726 } | |
| 727 | |
| 728 // static | |
| 729 bool ClientSocketPoolBaseHelper::set_connect_backup_jobs_enabled(bool enabled) { | |
| 730 bool old_value = g_connect_backup_jobs_enabled; | |
| 731 g_connect_backup_jobs_enabled = enabled; | |
| 732 return old_value; | |
| 733 } | |
| 734 | |
| 735 void ClientSocketPoolBaseHelper::EnableConnectBackupJobs() { | |
| 736 connect_backup_jobs_enabled_ = g_connect_backup_jobs_enabled; | |
| 737 } | |
| 738 | |
| 739 void ClientSocketPoolBaseHelper::IncrementIdleCount() { | |
| 740 if (++idle_socket_count_ == 1 && use_cleanup_timer_) | |
| 741 StartIdleSocketTimer(); | |
| 742 } | |
| 743 | |
| 744 void ClientSocketPoolBaseHelper::DecrementIdleCount() { | |
| 745 if (--idle_socket_count_ == 0) | |
| 746 timer_.Stop(); | |
| 747 } | |
| 748 | |
| 749 // static | |
| 750 bool ClientSocketPoolBaseHelper::cleanup_timer_enabled() { | |
| 751 return g_cleanup_timer_enabled; | |
| 752 } | |
| 753 | |
| 754 // static | |
| 755 bool ClientSocketPoolBaseHelper::set_cleanup_timer_enabled(bool enabled) { | |
| 756 bool old_value = g_cleanup_timer_enabled; | |
| 757 g_cleanup_timer_enabled = enabled; | |
| 758 return old_value; | |
| 759 } | |
| 760 | |
| 761 void ClientSocketPoolBaseHelper::StartIdleSocketTimer() { | |
| 762 timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kCleanupInterval), this, | |
| 763 &ClientSocketPoolBaseHelper::OnCleanupTimerFired); | |
| 764 } | |
| 765 | |
| 766 void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name, | |
| 767 scoped_ptr<StreamSocket> socket, | |
| 768 int id) { | |
| 769 GroupMap::iterator i = group_map_.find(group_name); | |
| 770 CHECK(i != group_map_.end()); | |
| 771 | |
| 772 Group* group = i->second; | |
| 773 | |
| 774 CHECK_GT(handed_out_socket_count_, 0); | |
| 775 handed_out_socket_count_--; | |
| 776 | |
| 777 CHECK_GT(group->active_socket_count(), 0); | |
| 778 group->DecrementActiveSocketCount(); | |
| 779 | |
| 780 const bool can_reuse = socket->IsConnectedAndIdle() && | |
| 781 id == pool_generation_number_; | |
| 782 if (can_reuse) { | |
| 783 // Add it to the idle list. | |
| 784 AddIdleSocket(socket.Pass(), group); | |
| 785 OnAvailableSocketSlot(group_name, group); | |
| 786 } else { | |
| 787 socket.reset(); | |
| 788 } | |
| 789 | |
| 790 CheckForStalledSocketGroups(); | |
| 791 } | |
| 792 | |
| 793 void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() { | |
| 794 // If we have idle sockets, see if we can give one to the top-stalled group. | |
| 795 std::string top_group_name; | |
| 796 Group* top_group = NULL; | |
| 797 if (!FindTopStalledGroup(&top_group, &top_group_name)) { | |
| 798 // There may still be a stalled group in a lower level pool. | |
| 799 for (std::set<LowerLayeredPool*>::iterator it = lower_pools_.begin(); | |
| 800 it != lower_pools_.end(); | |
| 801 ++it) { | |
| 802 if ((*it)->IsStalled()) { | |
| 803 CloseOneIdleSocket(); | |
| 804 break; | |
| 805 } | |
| 806 } | |
| 807 return; | |
| 808 } | |
| 809 | |
| 810 if (ReachedMaxSocketsLimit()) { | |
| 811 if (idle_socket_count() > 0) { | |
| 812 CloseOneIdleSocket(); | |
| 813 } else { | |
| 814 // We can't activate more sockets since we're already at our global | |
| 815 // limit. | |
| 816 return; | |
| 817 } | |
| 818 } | |
| 819 | |
| 820 // Note: we don't loop on waking stalled groups. If the stalled group is at | |
| 821 // its limit, may be left with other stalled groups that could be | |
| 822 // woken. This isn't optimal, but there is no starvation, so to avoid | |
| 823 // the looping we leave it at this. | |
| 824 OnAvailableSocketSlot(top_group_name, top_group); | |
| 825 } | |
| 826 | |
| 827 // Search for the highest priority pending request, amongst the groups that | |
| 828 // are not at the |max_sockets_per_group_| limit. Note: for requests with | |
| 829 // the same priority, the winner is based on group hash ordering (and not | |
| 830 // insertion order). | |
| 831 bool ClientSocketPoolBaseHelper::FindTopStalledGroup( | |
| 832 Group** group, | |
| 833 std::string* group_name) const { | |
| 834 CHECK((group && group_name) || (!group && !group_name)); | |
| 835 Group* top_group = NULL; | |
| 836 const std::string* top_group_name = NULL; | |
| 837 bool has_stalled_group = false; | |
| 838 for (GroupMap::const_iterator i = group_map_.begin(); | |
| 839 i != group_map_.end(); ++i) { | |
| 840 Group* curr_group = i->second; | |
| 841 if (!curr_group->has_pending_requests()) | |
| 842 continue; | |
| 843 if (curr_group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) { | |
| 844 if (!group) | |
| 845 return true; | |
| 846 has_stalled_group = true; | |
| 847 bool has_higher_priority = !top_group || | |
| 848 curr_group->TopPendingPriority() > top_group->TopPendingPriority(); | |
| 849 if (has_higher_priority) { | |
| 850 top_group = curr_group; | |
| 851 top_group_name = &i->first; | |
| 852 } | |
| 853 } | |
| 854 } | |
| 855 | |
| 856 if (top_group) { | |
| 857 CHECK(group); | |
| 858 *group = top_group; | |
| 859 *group_name = *top_group_name; | |
| 860 } else { | |
| 861 CHECK(!has_stalled_group); | |
| 862 } | |
| 863 return has_stalled_group; | |
| 864 } | |
| 865 | |
| 866 void ClientSocketPoolBaseHelper::OnConnectJobComplete( | |
| 867 int result, ConnectJob* job) { | |
| 868 // TODO(vadimt): Remove ScopedTracker below once crbug.com/436634 is fixed. | |
| 869 tracked_objects::ScopedTracker tracking_profile( | |
| 870 FROM_HERE_WITH_EXPLICIT_FUNCTION( | |
| 871 "436634 ClientSocketPoolBaseHelper::OnConnectJobComplete")); | |
| 872 | |
| 873 DCHECK_NE(ERR_IO_PENDING, result); | |
| 874 const std::string group_name = job->group_name(); | |
| 875 GroupMap::iterator group_it = group_map_.find(group_name); | |
| 876 CHECK(group_it != group_map_.end()); | |
| 877 Group* group = group_it->second; | |
| 878 | |
| 879 scoped_ptr<StreamSocket> socket = job->PassSocket(); | |
| 880 | |
| 881 // Copies of these are needed because |job| may be deleted before they are | |
| 882 // accessed. | |
| 883 BoundNetLog job_log = job->net_log(); | |
| 884 LoadTimingInfo::ConnectTiming connect_timing = job->connect_timing(); | |
| 885 | |
| 886 // RemoveConnectJob(job, _) must be called by all branches below; | |
| 887 // otherwise, |job| will be leaked. | |
| 888 | |
| 889 if (result == OK) { | |
| 890 DCHECK(socket.get()); | |
| 891 RemoveConnectJob(job, group); | |
| 892 scoped_ptr<const Request> request = group->PopNextPendingRequest(); | |
| 893 if (request) { | |
| 894 LogBoundConnectJobToRequest(job_log.source(), *request); | |
| 895 HandOutSocket( | |
| 896 socket.Pass(), ClientSocketHandle::UNUSED, connect_timing, | |
| 897 request->handle(), base::TimeDelta(), group, request->net_log()); | |
| 898 request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL); | |
| 899 InvokeUserCallbackLater(request->handle(), request->callback(), result); | |
| 900 } else { | |
| 901 AddIdleSocket(socket.Pass(), group); | |
| 902 OnAvailableSocketSlot(group_name, group); | |
| 903 CheckForStalledSocketGroups(); | |
| 904 } | |
| 905 } else { | |
| 906 // If we got a socket, it must contain error information so pass that | |
| 907 // up so that the caller can retrieve it. | |
| 908 bool handed_out_socket = false; | |
| 909 scoped_ptr<const Request> request = group->PopNextPendingRequest(); | |
| 910 if (request) { | |
| 911 LogBoundConnectJobToRequest(job_log.source(), *request); | |
| 912 job->GetAdditionalErrorState(request->handle()); | |
| 913 RemoveConnectJob(job, group); | |
| 914 if (socket.get()) { | |
| 915 handed_out_socket = true; | |
| 916 HandOutSocket(socket.Pass(), ClientSocketHandle::UNUSED, | |
| 917 connect_timing, request->handle(), base::TimeDelta(), | |
| 918 group, request->net_log()); | |
| 919 } | |
| 920 request->net_log().EndEventWithNetErrorCode( | |
| 921 NetLog::TYPE_SOCKET_POOL, result); | |
| 922 InvokeUserCallbackLater(request->handle(), request->callback(), result); | |
| 923 } else { | |
| 924 RemoveConnectJob(job, group); | |
| 925 } | |
| 926 if (!handed_out_socket) { | |
| 927 OnAvailableSocketSlot(group_name, group); | |
| 928 CheckForStalledSocketGroups(); | |
| 929 } | |
| 930 } | |
| 931 } | |
| 932 | |
| 933 void ClientSocketPoolBaseHelper::OnIPAddressChanged() { | |
| 934 FlushWithError(ERR_NETWORK_CHANGED); | |
| 935 } | |
| 936 | |
| 937 void ClientSocketPoolBaseHelper::FlushWithError(int error) { | |
| 938 pool_generation_number_++; | |
| 939 CancelAllConnectJobs(); | |
| 940 CloseIdleSockets(); | |
| 941 CancelAllRequestsWithError(error); | |
| 942 } | |
| 943 | |
| 944 void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job, | |
| 945 Group* group) { | |
| 946 CHECK_GT(connecting_socket_count_, 0); | |
| 947 connecting_socket_count_--; | |
| 948 | |
| 949 DCHECK(group); | |
| 950 group->RemoveJob(job); | |
| 951 } | |
| 952 | |
| 953 void ClientSocketPoolBaseHelper::OnAvailableSocketSlot( | |
| 954 const std::string& group_name, Group* group) { | |
| 955 DCHECK(ContainsKey(group_map_, group_name)); | |
| 956 if (group->IsEmpty()) { | |
| 957 RemoveGroup(group_name); | |
| 958 } else if (group->has_pending_requests()) { | |
| 959 ProcessPendingRequest(group_name, group); | |
| 960 } | |
| 961 } | |
| 962 | |
| 963 void ClientSocketPoolBaseHelper::ProcessPendingRequest( | |
| 964 const std::string& group_name, Group* group) { | |
| 965 const Request* next_request = group->GetNextPendingRequest(); | |
| 966 DCHECK(next_request); | |
| 967 int rv = RequestSocketInternal(group_name, *next_request); | |
| 968 if (rv != ERR_IO_PENDING) { | |
| 969 scoped_ptr<const Request> request = group->PopNextPendingRequest(); | |
| 970 DCHECK(request); | |
| 971 if (group->IsEmpty()) | |
| 972 RemoveGroup(group_name); | |
| 973 | |
| 974 request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv); | |
| 975 InvokeUserCallbackLater(request->handle(), request->callback(), rv); | |
| 976 } | |
| 977 } | |
| 978 | |
| 979 void ClientSocketPoolBaseHelper::HandOutSocket( | |
| 980 scoped_ptr<StreamSocket> socket, | |
| 981 ClientSocketHandle::SocketReuseType reuse_type, | |
| 982 const LoadTimingInfo::ConnectTiming& connect_timing, | |
| 983 ClientSocketHandle* handle, | |
| 984 base::TimeDelta idle_time, | |
| 985 Group* group, | |
| 986 const BoundNetLog& net_log) { | |
| 987 DCHECK(socket); | |
| 988 handle->SetSocket(socket.Pass()); | |
| 989 handle->set_reuse_type(reuse_type); | |
| 990 handle->set_idle_time(idle_time); | |
| 991 handle->set_pool_id(pool_generation_number_); | |
| 992 handle->set_connect_timing(connect_timing); | |
| 993 | |
| 994 if (handle->is_reused()) { | |
| 995 net_log.AddEvent( | |
| 996 NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET, | |
| 997 NetLog::IntegerCallback( | |
| 998 "idle_ms", static_cast<int>(idle_time.InMilliseconds()))); | |
| 999 } | |
| 1000 | |
| 1001 net_log.AddEvent( | |
| 1002 NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET, | |
| 1003 handle->socket()->NetLog().source().ToEventParametersCallback()); | |
| 1004 | |
| 1005 handed_out_socket_count_++; | |
| 1006 group->IncrementActiveSocketCount(); | |
| 1007 } | |
| 1008 | |
| 1009 void ClientSocketPoolBaseHelper::AddIdleSocket( | |
| 1010 scoped_ptr<StreamSocket> socket, | |
| 1011 Group* group) { | |
| 1012 DCHECK(socket); | |
| 1013 IdleSocket idle_socket; | |
| 1014 idle_socket.socket = socket.release(); | |
| 1015 idle_socket.start_time = base::TimeTicks::Now(); | |
| 1016 | |
| 1017 group->mutable_idle_sockets()->push_back(idle_socket); | |
| 1018 IncrementIdleCount(); | |
| 1019 } | |
| 1020 | |
| 1021 void ClientSocketPoolBaseHelper::CancelAllConnectJobs() { | |
| 1022 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) { | |
| 1023 Group* group = i->second; | |
| 1024 connecting_socket_count_ -= group->jobs().size(); | |
| 1025 group->RemoveAllJobs(); | |
| 1026 | |
| 1027 // Delete group if no longer needed. | |
| 1028 if (group->IsEmpty()) { | |
| 1029 // RemoveGroup() will call .erase() which will invalidate the iterator, | |
| 1030 // but i will already have been incremented to a valid iterator before | |
| 1031 // RemoveGroup() is called. | |
| 1032 RemoveGroup(i++); | |
| 1033 } else { | |
| 1034 ++i; | |
| 1035 } | |
| 1036 } | |
| 1037 DCHECK_EQ(0, connecting_socket_count_); | |
| 1038 } | |
| 1039 | |
| 1040 void ClientSocketPoolBaseHelper::CancelAllRequestsWithError(int error) { | |
| 1041 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) { | |
| 1042 Group* group = i->second; | |
| 1043 | |
| 1044 while (true) { | |
| 1045 scoped_ptr<const Request> request = group->PopNextPendingRequest(); | |
| 1046 if (!request) | |
| 1047 break; | |
| 1048 InvokeUserCallbackLater(request->handle(), request->callback(), error); | |
| 1049 } | |
| 1050 | |
| 1051 // Delete group if no longer needed. | |
| 1052 if (group->IsEmpty()) { | |
| 1053 // RemoveGroup() will call .erase() which will invalidate the iterator, | |
| 1054 // but i will already have been incremented to a valid iterator before | |
| 1055 // RemoveGroup() is called. | |
| 1056 RemoveGroup(i++); | |
| 1057 } else { | |
| 1058 ++i; | |
| 1059 } | |
| 1060 } | |
| 1061 } | |
| 1062 | |
| 1063 bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const { | |
| 1064 // Each connecting socket will eventually connect and be handed out. | |
| 1065 int total = handed_out_socket_count_ + connecting_socket_count_ + | |
| 1066 idle_socket_count(); | |
| 1067 // There can be more sockets than the limit since some requests can ignore | |
| 1068 // the limit | |
| 1069 if (total < max_sockets_) | |
| 1070 return false; | |
| 1071 return true; | |
| 1072 } | |
| 1073 | |
| 1074 bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() { | |
| 1075 if (idle_socket_count() == 0) | |
| 1076 return false; | |
| 1077 return CloseOneIdleSocketExceptInGroup(NULL); | |
| 1078 } | |
| 1079 | |
| 1080 bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup( | |
| 1081 const Group* exception_group) { | |
| 1082 CHECK_GT(idle_socket_count(), 0); | |
| 1083 | |
| 1084 for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) { | |
| 1085 Group* group = i->second; | |
| 1086 if (exception_group == group) | |
| 1087 continue; | |
| 1088 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets(); | |
| 1089 | |
| 1090 if (!idle_sockets->empty()) { | |
| 1091 delete idle_sockets->front().socket; | |
| 1092 idle_sockets->pop_front(); | |
| 1093 DecrementIdleCount(); | |
| 1094 if (group->IsEmpty()) | |
| 1095 RemoveGroup(i); | |
| 1096 | |
| 1097 return true; | |
| 1098 } | |
| 1099 } | |
| 1100 | |
| 1101 return false; | |
| 1102 } | |
| 1103 | |
| 1104 bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInHigherLayeredPool() { | |
| 1105 // This pool doesn't have any idle sockets. It's possible that a pool at a | |
| 1106 // higher layer is holding one of this sockets active, but it's actually idle. | |
| 1107 // Query the higher layers. | |
| 1108 for (std::set<HigherLayeredPool*>::const_iterator it = higher_pools_.begin(); | |
| 1109 it != higher_pools_.end(); ++it) { | |
| 1110 if ((*it)->CloseOneIdleConnection()) | |
| 1111 return true; | |
| 1112 } | |
| 1113 return false; | |
| 1114 } | |
| 1115 | |
| 1116 void ClientSocketPoolBaseHelper::InvokeUserCallbackLater( | |
| 1117 ClientSocketHandle* handle, const CompletionCallback& callback, int rv) { | |
| 1118 CHECK(!ContainsKey(pending_callback_map_, handle)); | |
| 1119 pending_callback_map_[handle] = CallbackResultPair(callback, rv); | |
| 1120 base::MessageLoop::current()->PostTask( | |
| 1121 FROM_HERE, | |
| 1122 base::Bind(&ClientSocketPoolBaseHelper::InvokeUserCallback, | |
| 1123 weak_factory_.GetWeakPtr(), handle)); | |
| 1124 } | |
| 1125 | |
| 1126 void ClientSocketPoolBaseHelper::InvokeUserCallback( | |
| 1127 ClientSocketHandle* handle) { | |
| 1128 // TODO(pkasting): Remove ScopedTracker below once crbug.com/455884 is fixed. | |
| 1129 tracked_objects::ScopedTracker tracking_profile( | |
| 1130 FROM_HERE_WITH_EXPLICIT_FUNCTION( | |
| 1131 "455884 ClientSocketPoolBaseHelper::InvokeUserCallback")); | |
| 1132 PendingCallbackMap::iterator it = pending_callback_map_.find(handle); | |
| 1133 | |
| 1134 // Exit if the request has already been cancelled. | |
| 1135 if (it == pending_callback_map_.end()) | |
| 1136 return; | |
| 1137 | |
| 1138 CHECK(!handle->is_initialized()); | |
| 1139 CompletionCallback callback = it->second.callback; | |
| 1140 int result = it->second.result; | |
| 1141 pending_callback_map_.erase(it); | |
| 1142 callback.Run(result); | |
| 1143 } | |
| 1144 | |
| 1145 void ClientSocketPoolBaseHelper::TryToCloseSocketsInLayeredPools() { | |
| 1146 while (IsStalled()) { | |
| 1147 // Closing a socket will result in calling back into |this| to use the freed | |
| 1148 // socket slot, so nothing else is needed. | |
| 1149 if (!CloseOneIdleConnectionInHigherLayeredPool()) | |
| 1150 return; | |
| 1151 } | |
| 1152 } | |
| 1153 | |
| 1154 ClientSocketPoolBaseHelper::Group::Group() | |
| 1155 : unassigned_job_count_(0), | |
| 1156 pending_requests_(NUM_PRIORITIES), | |
| 1157 active_socket_count_(0) {} | |
| 1158 | |
| 1159 ClientSocketPoolBaseHelper::Group::~Group() { | |
| 1160 DCHECK_EQ(0u, unassigned_job_count_); | |
| 1161 } | |
| 1162 | |
| 1163 void ClientSocketPoolBaseHelper::Group::StartBackupJobTimer( | |
| 1164 const std::string& group_name, | |
| 1165 ClientSocketPoolBaseHelper* pool) { | |
| 1166 // Only allow one timer to run at a time. | |
| 1167 if (BackupJobTimerIsRunning()) | |
| 1168 return; | |
| 1169 | |
| 1170 // Unretained here is okay because |backup_job_timer_| is | |
| 1171 // automatically cancelled when it's destroyed. | |
| 1172 backup_job_timer_.Start( | |
| 1173 FROM_HERE, pool->ConnectRetryInterval(), | |
| 1174 base::Bind(&Group::OnBackupJobTimerFired, base::Unretained(this), | |
| 1175 group_name, pool)); | |
| 1176 } | |
| 1177 | |
| 1178 bool ClientSocketPoolBaseHelper::Group::BackupJobTimerIsRunning() const { | |
| 1179 return backup_job_timer_.IsRunning(); | |
| 1180 } | |
| 1181 | |
| 1182 bool ClientSocketPoolBaseHelper::Group::TryToUseUnassignedConnectJob() { | |
| 1183 SanityCheck(); | |
| 1184 | |
| 1185 if (unassigned_job_count_ == 0) | |
| 1186 return false; | |
| 1187 --unassigned_job_count_; | |
| 1188 return true; | |
| 1189 } | |
| 1190 | |
| 1191 void ClientSocketPoolBaseHelper::Group::AddJob(scoped_ptr<ConnectJob> job, | |
| 1192 bool is_preconnect) { | |
| 1193 SanityCheck(); | |
| 1194 | |
| 1195 if (is_preconnect) | |
| 1196 ++unassigned_job_count_; | |
| 1197 jobs_.insert(job.release()); | |
| 1198 } | |
| 1199 | |
| 1200 void ClientSocketPoolBaseHelper::Group::RemoveJob(ConnectJob* job) { | |
| 1201 scoped_ptr<ConnectJob> owned_job(job); | |
| 1202 SanityCheck(); | |
| 1203 | |
| 1204 std::set<ConnectJob*>::iterator it = jobs_.find(job); | |
| 1205 if (it != jobs_.end()) { | |
| 1206 jobs_.erase(it); | |
| 1207 } else { | |
| 1208 NOTREACHED(); | |
| 1209 } | |
| 1210 size_t job_count = jobs_.size(); | |
| 1211 if (job_count < unassigned_job_count_) | |
| 1212 unassigned_job_count_ = job_count; | |
| 1213 | |
| 1214 // If we've got no more jobs for this group, then we no longer need a | |
| 1215 // backup job either. | |
| 1216 if (jobs_.empty()) | |
| 1217 backup_job_timer_.Stop(); | |
| 1218 } | |
| 1219 | |
| 1220 void ClientSocketPoolBaseHelper::Group::OnBackupJobTimerFired( | |
| 1221 std::string group_name, | |
| 1222 ClientSocketPoolBaseHelper* pool) { | |
| 1223 // If there are no more jobs pending, there is no work to do. | |
| 1224 // If we've done our cleanups correctly, this should not happen. | |
| 1225 if (jobs_.empty()) { | |
| 1226 NOTREACHED(); | |
| 1227 return; | |
| 1228 } | |
| 1229 | |
| 1230 // If our old job is waiting on DNS, or if we can't create any sockets | |
| 1231 // right now due to limits, just reset the timer. | |
| 1232 if (pool->ReachedMaxSocketsLimit() || | |
| 1233 !HasAvailableSocketSlot(pool->max_sockets_per_group_) || | |
| 1234 (*jobs_.begin())->GetLoadState() == LOAD_STATE_RESOLVING_HOST) { | |
| 1235 StartBackupJobTimer(group_name, pool); | |
| 1236 return; | |
| 1237 } | |
| 1238 | |
| 1239 if (pending_requests_.empty()) | |
| 1240 return; | |
| 1241 | |
| 1242 scoped_ptr<ConnectJob> backup_job = | |
| 1243 pool->connect_job_factory_->NewConnectJob( | |
| 1244 group_name, *pending_requests_.FirstMax().value(), pool); | |
| 1245 backup_job->net_log().AddEvent(NetLog::TYPE_BACKUP_CONNECT_JOB_CREATED); | |
| 1246 int rv = backup_job->Connect(); | |
| 1247 pool->connecting_socket_count_++; | |
| 1248 ConnectJob* raw_backup_job = backup_job.get(); | |
| 1249 AddJob(backup_job.Pass(), false); | |
| 1250 if (rv != ERR_IO_PENDING) | |
| 1251 pool->OnConnectJobComplete(rv, raw_backup_job); | |
| 1252 } | |
| 1253 | |
| 1254 void ClientSocketPoolBaseHelper::Group::SanityCheck() { | |
| 1255 DCHECK_LE(unassigned_job_count_, jobs_.size()); | |
| 1256 } | |
| 1257 | |
| 1258 void ClientSocketPoolBaseHelper::Group::RemoveAllJobs() { | |
| 1259 SanityCheck(); | |
| 1260 | |
| 1261 // Delete active jobs. | |
| 1262 STLDeleteElements(&jobs_); | |
| 1263 unassigned_job_count_ = 0; | |
| 1264 | |
| 1265 // Stop backup job timer. | |
| 1266 backup_job_timer_.Stop(); | |
| 1267 } | |
| 1268 | |
| 1269 const ClientSocketPoolBaseHelper::Request* | |
| 1270 ClientSocketPoolBaseHelper::Group::GetNextPendingRequest() const { | |
| 1271 return | |
| 1272 pending_requests_.empty() ? NULL : pending_requests_.FirstMax().value(); | |
| 1273 } | |
| 1274 | |
| 1275 bool ClientSocketPoolBaseHelper::Group::HasConnectJobForHandle( | |
| 1276 const ClientSocketHandle* handle) const { | |
| 1277 // Search the first |jobs_.size()| pending requests for |handle|. | |
| 1278 // If it's farther back in the deque than that, it doesn't have a | |
| 1279 // corresponding ConnectJob. | |
| 1280 size_t i = 0; | |
| 1281 for (RequestQueue::Pointer pointer = pending_requests_.FirstMax(); | |
| 1282 !pointer.is_null() && i < jobs_.size(); | |
| 1283 pointer = pending_requests_.GetNextTowardsLastMin(pointer), ++i) { | |
| 1284 if (pointer.value()->handle() == handle) | |
| 1285 return true; | |
| 1286 } | |
| 1287 return false; | |
| 1288 } | |
| 1289 | |
| 1290 void ClientSocketPoolBaseHelper::Group::InsertPendingRequest( | |
| 1291 scoped_ptr<const Request> request) { | |
| 1292 // This value must be cached before we release |request|. | |
| 1293 RequestPriority priority = request->priority(); | |
| 1294 if (request->ignore_limits()) { | |
| 1295 // Put requests with ignore_limits == true (which should have | |
| 1296 // priority == MAXIMUM_PRIORITY) ahead of other requests with | |
| 1297 // MAXIMUM_PRIORITY. | |
| 1298 DCHECK_EQ(priority, MAXIMUM_PRIORITY); | |
| 1299 pending_requests_.InsertAtFront(request.release(), priority); | |
| 1300 } else { | |
| 1301 pending_requests_.Insert(request.release(), priority); | |
| 1302 } | |
| 1303 } | |
| 1304 | |
| 1305 scoped_ptr<const ClientSocketPoolBaseHelper::Request> | |
| 1306 ClientSocketPoolBaseHelper::Group::PopNextPendingRequest() { | |
| 1307 if (pending_requests_.empty()) | |
| 1308 return scoped_ptr<const ClientSocketPoolBaseHelper::Request>(); | |
| 1309 return RemovePendingRequest(pending_requests_.FirstMax()); | |
| 1310 } | |
| 1311 | |
| 1312 scoped_ptr<const ClientSocketPoolBaseHelper::Request> | |
| 1313 ClientSocketPoolBaseHelper::Group::FindAndRemovePendingRequest( | |
| 1314 ClientSocketHandle* handle) { | |
| 1315 for (RequestQueue::Pointer pointer = pending_requests_.FirstMax(); | |
| 1316 !pointer.is_null(); | |
| 1317 pointer = pending_requests_.GetNextTowardsLastMin(pointer)) { | |
| 1318 if (pointer.value()->handle() == handle) { | |
| 1319 scoped_ptr<const Request> request = RemovePendingRequest(pointer); | |
| 1320 return request.Pass(); | |
| 1321 } | |
| 1322 } | |
| 1323 return scoped_ptr<const ClientSocketPoolBaseHelper::Request>(); | |
| 1324 } | |
| 1325 | |
| 1326 scoped_ptr<const ClientSocketPoolBaseHelper::Request> | |
| 1327 ClientSocketPoolBaseHelper::Group::RemovePendingRequest( | |
| 1328 const RequestQueue::Pointer& pointer) { | |
| 1329 scoped_ptr<const Request> request(pointer.value()); | |
| 1330 pending_requests_.Erase(pointer); | |
| 1331 // If there are no more requests, kill the backup timer. | |
| 1332 if (pending_requests_.empty()) | |
| 1333 backup_job_timer_.Stop(); | |
| 1334 return request.Pass(); | |
| 1335 } | |
| 1336 | |
| 1337 } // namespace internal | |
| 1338 | |
| 1339 } // namespace net | |
| OLD | NEW |