OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "net/socket/websocket_transport_client_socket_pool.h" |
| 6 |
| 7 #include <algorithm> |
| 8 |
| 9 #include "base/compiler_specific.h" |
| 10 #include "base/logging.h" |
| 11 #include "base/numerics/safe_conversions.h" |
| 12 #include "base/strings/string_util.h" |
| 13 #include "base/time/time.h" |
| 14 #include "base/values.h" |
| 15 #include "net/base/net_errors.h" |
| 16 #include "net/base/net_log.h" |
| 17 #include "net/socket/client_socket_handle.h" |
| 18 #include "net/socket/client_socket_pool_base.h" |
| 19 #include "net/socket/websocket_endpoint_lock_manager.h" |
| 20 #include "net/socket/websocket_transport_connect_sub_job.h" |
| 21 |
| 22 namespace net { |
| 23 |
| 24 namespace { |
| 25 |
| 26 using base::TimeDelta; |
| 27 |
| 28 // TODO(ricea): For now, we implement a global timeout for compatability with |
| 29 // TransportConnectJob. Since WebSocketTransportConnectJob controls the address |
| 30 // selection process more tightly, it could do something smarter here. |
| 31 const int kTransportConnectJobTimeoutInSeconds = 240; // 4 minutes. |
| 32 |
| 33 } // namespace |
| 34 |
| 35 WebSocketTransportConnectJob::WebSocketTransportConnectJob( |
| 36 const std::string& group_name, |
| 37 RequestPriority priority, |
| 38 const scoped_refptr<TransportSocketParams>& params, |
| 39 TimeDelta timeout_duration, |
| 40 const CompletionCallback& callback, |
| 41 ClientSocketFactory* client_socket_factory, |
| 42 HostResolver* host_resolver, |
| 43 ClientSocketHandle* handle, |
| 44 Delegate* delegate, |
| 45 NetLog* pool_net_log, |
| 46 const BoundNetLog& request_net_log) |
| 47 : ConnectJob(group_name, |
| 48 timeout_duration, |
| 49 priority, |
| 50 delegate, |
| 51 BoundNetLog::Make(pool_net_log, NetLog::SOURCE_CONNECT_JOB)), |
| 52 helper_(params, client_socket_factory, host_resolver, &connect_timing_), |
| 53 race_result_(TransportConnectJobHelper::CONNECTION_LATENCY_UNKNOWN), |
| 54 handle_(handle), |
| 55 callback_(callback), |
| 56 request_net_log_(request_net_log), |
| 57 had_ipv4_(false), |
| 58 had_ipv6_(false) { |
| 59 helper_.SetOnIOComplete(this); |
| 60 } |
| 61 |
| 62 WebSocketTransportConnectJob::~WebSocketTransportConnectJob() {} |
| 63 |
| 64 LoadState WebSocketTransportConnectJob::GetLoadState() const { |
| 65 LoadState load_state = LOAD_STATE_RESOLVING_HOST; |
| 66 if (ipv6_job_) |
| 67 load_state = ipv6_job_->GetLoadState(); |
| 68 // This method should return LOAD_STATE_CONNECTING in preference to |
| 69 // LOAD_STATE_WAITING_FOR_AVAILABLE_SOCKET when possible because "waiting for |
| 70 // available socket" implies that nothing is happening. |
| 71 if (ipv4_job_ && load_state != LOAD_STATE_CONNECTING) |
| 72 load_state = ipv4_job_->GetLoadState(); |
| 73 return load_state; |
| 74 } |
| 75 |
| 76 int WebSocketTransportConnectJob::DoResolveHost() { |
| 77 return helper_.DoResolveHost(priority(), net_log()); |
| 78 } |
| 79 |
| 80 int WebSocketTransportConnectJob::DoResolveHostComplete(int result) { |
| 81 return helper_.DoResolveHostComplete(result, net_log()); |
| 82 } |
| 83 |
| 84 int WebSocketTransportConnectJob::DoTransportConnect() { |
| 85 AddressList ipv4_addresses; |
| 86 AddressList ipv6_addresses; |
| 87 int result = ERR_UNEXPECTED; |
| 88 helper_.set_next_state( |
| 89 TransportConnectJobHelper::STATE_TRANSPORT_CONNECT_COMPLETE); |
| 90 |
| 91 for (AddressList::const_iterator it = helper_.addresses().begin(); |
| 92 it != helper_.addresses().end(); |
| 93 ++it) { |
| 94 switch (it->GetFamily()) { |
| 95 case ADDRESS_FAMILY_IPV4: |
| 96 ipv4_addresses.push_back(*it); |
| 97 break; |
| 98 |
| 99 case ADDRESS_FAMILY_IPV6: |
| 100 ipv6_addresses.push_back(*it); |
| 101 break; |
| 102 |
| 103 default: |
| 104 DVLOG(1) << "Unexpected ADDRESS_FAMILY: " << it->GetFamily(); |
| 105 break; |
| 106 } |
| 107 } |
| 108 |
| 109 if (!ipv4_addresses.empty()) { |
| 110 had_ipv4_ = true; |
| 111 ipv4_job_.reset(new WebSocketTransportConnectSubJob( |
| 112 ipv4_addresses, this, SUB_JOB_IPV4)); |
| 113 } |
| 114 |
| 115 if (!ipv6_addresses.empty()) { |
| 116 had_ipv6_ = true; |
| 117 ipv6_job_.reset(new WebSocketTransportConnectSubJob( |
| 118 ipv6_addresses, this, SUB_JOB_IPV6)); |
| 119 result = ipv6_job_->Start(); |
| 120 switch (result) { |
| 121 case OK: |
| 122 SetSocket(ipv6_job_->PassSocket()); |
| 123 race_result_ = |
| 124 had_ipv4_ |
| 125 ? TransportConnectJobHelper::CONNECTION_LATENCY_IPV6_RACEABLE |
| 126 : TransportConnectJobHelper::CONNECTION_LATENCY_IPV6_SOLO; |
| 127 return result; |
| 128 |
| 129 case ERR_IO_PENDING: |
| 130 if (ipv4_job_) { |
| 131 // This use of base::Unretained is safe because |fallback_timer_| is |
| 132 // owned by this object. |
| 133 fallback_timer_.Start( |
| 134 FROM_HERE, |
| 135 TimeDelta::FromMilliseconds( |
| 136 TransportConnectJobHelper::kIPv6FallbackTimerInMs), |
| 137 base::Bind(&WebSocketTransportConnectJob::StartIPv4JobAsync, |
| 138 base::Unretained(this))); |
| 139 } |
| 140 return result; |
| 141 |
| 142 default: |
| 143 ipv6_job_.reset(); |
| 144 } |
| 145 } |
| 146 |
| 147 DCHECK(!ipv6_job_); |
| 148 if (ipv4_job_) { |
| 149 result = ipv4_job_->Start(); |
| 150 if (result == OK) { |
| 151 SetSocket(ipv4_job_->PassSocket()); |
| 152 race_result_ = |
| 153 had_ipv6_ |
| 154 ? TransportConnectJobHelper::CONNECTION_LATENCY_IPV4_WINS_RACE |
| 155 : TransportConnectJobHelper::CONNECTION_LATENCY_IPV4_NO_RACE; |
| 156 } |
| 157 } |
| 158 |
| 159 return result; |
| 160 } |
| 161 |
| 162 int WebSocketTransportConnectJob::DoTransportConnectComplete(int result) { |
| 163 if (result == OK) |
| 164 helper_.HistogramDuration(race_result_); |
| 165 return result; |
| 166 } |
| 167 |
| 168 void WebSocketTransportConnectJob::OnSubJobComplete( |
| 169 int result, |
| 170 WebSocketTransportConnectSubJob* job) { |
| 171 if (result == OK) { |
| 172 switch (job->type()) { |
| 173 case SUB_JOB_IPV4: |
| 174 race_result_ = |
| 175 had_ipv6_ |
| 176 ? TransportConnectJobHelper::CONNECTION_LATENCY_IPV4_WINS_RACE |
| 177 : TransportConnectJobHelper::CONNECTION_LATENCY_IPV4_NO_RACE; |
| 178 break; |
| 179 |
| 180 case SUB_JOB_IPV6: |
| 181 race_result_ = |
| 182 had_ipv4_ |
| 183 ? TransportConnectJobHelper::CONNECTION_LATENCY_IPV6_RACEABLE |
| 184 : TransportConnectJobHelper::CONNECTION_LATENCY_IPV6_SOLO; |
| 185 break; |
| 186 } |
| 187 SetSocket(job->PassSocket()); |
| 188 |
| 189 // Make sure all connections are cancelled even if this object fails to be |
| 190 // deleted. |
| 191 ipv4_job_.reset(); |
| 192 ipv6_job_.reset(); |
| 193 } else { |
| 194 switch (job->type()) { |
| 195 case SUB_JOB_IPV4: |
| 196 ipv4_job_.reset(); |
| 197 break; |
| 198 |
| 199 case SUB_JOB_IPV6: |
| 200 ipv6_job_.reset(); |
| 201 if (ipv4_job_ && !ipv4_job_->started()) { |
| 202 fallback_timer_.Stop(); |
| 203 result = ipv4_job_->Start(); |
| 204 if (result != ERR_IO_PENDING) { |
| 205 OnSubJobComplete(result, ipv4_job_.get()); |
| 206 return; |
| 207 } |
| 208 } |
| 209 break; |
| 210 } |
| 211 if (ipv4_job_ || ipv6_job_) |
| 212 return; |
| 213 } |
| 214 helper_.OnIOComplete(this, result); |
| 215 } |
| 216 |
| 217 void WebSocketTransportConnectJob::StartIPv4JobAsync() { |
| 218 DCHECK(ipv4_job_); |
| 219 int result = ipv4_job_->Start(); |
| 220 if (result != ERR_IO_PENDING) |
| 221 OnSubJobComplete(result, ipv4_job_.get()); |
| 222 } |
| 223 |
| 224 int WebSocketTransportConnectJob::ConnectInternal() { |
| 225 return helper_.DoConnectInternal(this); |
| 226 } |
| 227 |
| 228 WebSocketTransportClientSocketPool::WebSocketTransportClientSocketPool( |
| 229 int max_sockets, |
| 230 int max_sockets_per_group, |
| 231 ClientSocketPoolHistograms* histograms, |
| 232 HostResolver* host_resolver, |
| 233 ClientSocketFactory* client_socket_factory, |
| 234 NetLog* net_log) |
| 235 : TransportClientSocketPool(max_sockets, |
| 236 max_sockets_per_group, |
| 237 histograms, |
| 238 host_resolver, |
| 239 client_socket_factory, |
| 240 net_log), |
| 241 connect_job_delegate_(this), |
| 242 histograms_(histograms), |
| 243 pool_net_log_(net_log), |
| 244 client_socket_factory_(client_socket_factory), |
| 245 host_resolver_(host_resolver), |
| 246 max_sockets_(max_sockets), |
| 247 handed_out_socket_count_(0), |
| 248 flushing_(false), |
| 249 weak_factory_(this) {} |
| 250 |
| 251 WebSocketTransportClientSocketPool::~WebSocketTransportClientSocketPool() { |
| 252 // Clean up any pending connect jobs. |
| 253 FlushWithError(ERR_ABORTED); |
| 254 DCHECK(pending_connects_.empty()); |
| 255 DCHECK_EQ(0, handed_out_socket_count_); |
| 256 DCHECK(stalled_request_queue_.empty()); |
| 257 DCHECK(stalled_request_map_.empty()); |
| 258 } |
| 259 |
| 260 // static |
| 261 void WebSocketTransportClientSocketPool::UnlockEndpoint( |
| 262 ClientSocketHandle* handle) { |
| 263 DCHECK(handle->is_initialized()); |
| 264 WebSocketEndpointLockManager::GetInstance()->UnlockSocket(handle->socket()); |
| 265 } |
| 266 |
| 267 int WebSocketTransportClientSocketPool::RequestSocket( |
| 268 const std::string& group_name, |
| 269 const void* params, |
| 270 RequestPriority priority, |
| 271 ClientSocketHandle* handle, |
| 272 const CompletionCallback& callback, |
| 273 const BoundNetLog& request_net_log) { |
| 274 DCHECK(params); |
| 275 const scoped_refptr<TransportSocketParams>& casted_params = |
| 276 *static_cast<const scoped_refptr<TransportSocketParams>*>(params); |
| 277 |
| 278 NetLogTcpClientSocketPoolRequestedSocket(request_net_log, &casted_params); |
| 279 |
| 280 CHECK(!callback.is_null()); |
| 281 CHECK(handle); |
| 282 |
| 283 request_net_log.BeginEvent(NetLog::TYPE_SOCKET_POOL); |
| 284 |
| 285 if (ReachedMaxSocketsLimit() && !casted_params->ignore_limits()) { |
| 286 request_net_log.AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS); |
| 287 // TODO(ricea): Use emplace_back when C++11 becomes allowed. |
| 288 StalledRequest request( |
| 289 casted_params, priority, handle, callback, request_net_log); |
| 290 stalled_request_queue_.push_back(request); |
| 291 StalledRequestQueue::iterator iterator = stalled_request_queue_.end(); |
| 292 --iterator; |
| 293 DCHECK_EQ(handle, iterator->handle); |
| 294 // Because StalledRequestQueue is a std::list, its iterators are guaranteed |
| 295 // to remain valid as long as the elements are not removed. As long as |
| 296 // stalled_request_queue_ and stalled_request_map_ are updated in sync, it |
| 297 // is safe to dereference an iterator in stalled_request_map_ to find the |
| 298 // corresponding list element. |
| 299 stalled_request_map_.insert( |
| 300 StalledRequestMap::value_type(handle, iterator)); |
| 301 return ERR_IO_PENDING; |
| 302 } |
| 303 |
| 304 scoped_ptr<WebSocketTransportConnectJob> connect_job( |
| 305 new WebSocketTransportConnectJob(group_name, |
| 306 priority, |
| 307 casted_params, |
| 308 ConnectionTimeout(), |
| 309 callback, |
| 310 client_socket_factory_, |
| 311 host_resolver_, |
| 312 handle, |
| 313 &connect_job_delegate_, |
| 314 pool_net_log_, |
| 315 request_net_log)); |
| 316 |
| 317 int rv = connect_job->Connect(); |
| 318 // Regardless of the outcome of |connect_job|, it will always be bound to |
| 319 // |handle|, since this pool uses early-binding. So the binding is logged |
| 320 // here, without waiting for the result. |
| 321 request_net_log.AddEvent( |
| 322 NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB, |
| 323 connect_job->net_log().source().ToEventParametersCallback()); |
| 324 if (rv == OK) { |
| 325 HandOutSocket(connect_job->PassSocket(), |
| 326 connect_job->connect_timing(), |
| 327 handle, |
| 328 request_net_log); |
| 329 request_net_log.EndEvent(NetLog::TYPE_SOCKET_POOL); |
| 330 } else if (rv == ERR_IO_PENDING) { |
| 331 // TODO(ricea): Implement backup job timer? |
| 332 AddJob(handle, connect_job.Pass()); |
| 333 } else { |
| 334 scoped_ptr<StreamSocket> error_socket; |
| 335 connect_job->GetAdditionalErrorState(handle); |
| 336 error_socket = connect_job->PassSocket(); |
| 337 if (error_socket) { |
| 338 HandOutSocket(error_socket.Pass(), |
| 339 connect_job->connect_timing(), |
| 340 handle, |
| 341 request_net_log); |
| 342 } |
| 343 } |
| 344 |
| 345 if (rv != ERR_IO_PENDING) { |
| 346 request_net_log.EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv); |
| 347 } |
| 348 |
| 349 return rv; |
| 350 } |
| 351 |
| 352 void WebSocketTransportClientSocketPool::RequestSockets( |
| 353 const std::string& group_name, |
| 354 const void* params, |
| 355 int num_sockets, |
| 356 const BoundNetLog& net_log) { |
| 357 NOTIMPLEMENTED(); |
| 358 } |
| 359 |
| 360 void WebSocketTransportClientSocketPool::CancelRequest( |
| 361 const std::string& group_name, |
| 362 ClientSocketHandle* handle) { |
| 363 if (DeleteStalledRequest(handle)) |
| 364 return; |
| 365 if (!DeleteJob(handle)) |
| 366 pending_callbacks_.erase(handle); |
| 367 if (!ReachedMaxSocketsLimit() && !stalled_request_queue_.empty()) |
| 368 ActivateStalledRequest(); |
| 369 } |
| 370 |
| 371 void WebSocketTransportClientSocketPool::ReleaseSocket( |
| 372 const std::string& group_name, |
| 373 scoped_ptr<StreamSocket> socket, |
| 374 int id) { |
| 375 WebSocketEndpointLockManager::GetInstance()->UnlockSocket(socket.get()); |
| 376 CHECK_GT(handed_out_socket_count_, 0); |
| 377 --handed_out_socket_count_; |
| 378 if (!ReachedMaxSocketsLimit() && !stalled_request_queue_.empty()) |
| 379 ActivateStalledRequest(); |
| 380 } |
| 381 |
| 382 void WebSocketTransportClientSocketPool::FlushWithError(int error) { |
| 383 // Sockets which are in LOAD_STATE_CONNECTING are in danger of unlocking |
| 384 // sockets waiting for the endpoint lock. If they connected synchronously, |
| 385 // then OnConnectJobComplete(). The |flushing_| flag tells this object to |
| 386 // ignore spurious calls to OnConnectJobComplete(). It is safe to ignore those |
| 387 // calls because this method will delete the jobs and call their callbacks |
| 388 // anyway. |
| 389 flushing_ = true; |
| 390 for (PendingConnectsMap::iterator it = pending_connects_.begin(); |
| 391 it != pending_connects_.end(); |
| 392 ++it) { |
| 393 InvokeUserCallbackLater( |
| 394 it->second->handle(), it->second->callback(), error); |
| 395 delete it->second, it->second = NULL; |
| 396 } |
| 397 pending_connects_.clear(); |
| 398 for (StalledRequestQueue::iterator it = stalled_request_queue_.begin(); |
| 399 it != stalled_request_queue_.end(); |
| 400 ++it) { |
| 401 InvokeUserCallbackLater(it->handle, it->callback, error); |
| 402 } |
| 403 stalled_request_map_.clear(); |
| 404 stalled_request_queue_.clear(); |
| 405 handed_out_socket_count_ = 0; |
| 406 flushing_ = false; |
| 407 } |
| 408 |
| 409 void WebSocketTransportClientSocketPool::CloseIdleSockets() { |
| 410 // We have no idle sockets. |
| 411 } |
| 412 |
| 413 int WebSocketTransportClientSocketPool::IdleSocketCount() const { |
| 414 return 0; |
| 415 } |
| 416 |
| 417 int WebSocketTransportClientSocketPool::IdleSocketCountInGroup( |
| 418 const std::string& group_name) const { |
| 419 return 0; |
| 420 } |
| 421 |
| 422 LoadState WebSocketTransportClientSocketPool::GetLoadState( |
| 423 const std::string& group_name, |
| 424 const ClientSocketHandle* handle) const { |
| 425 if (stalled_request_map_.find(handle) != stalled_request_map_.end()) |
| 426 return LOAD_STATE_WAITING_FOR_AVAILABLE_SOCKET; |
| 427 if (pending_callbacks_.count(handle)) |
| 428 return LOAD_STATE_CONNECTING; |
| 429 return LookupConnectJob(handle)->GetLoadState(); |
| 430 } |
| 431 |
| 432 base::DictionaryValue* WebSocketTransportClientSocketPool::GetInfoAsValue( |
| 433 const std::string& name, |
| 434 const std::string& type, |
| 435 bool include_nested_pools) const { |
| 436 base::DictionaryValue* dict = new base::DictionaryValue(); |
| 437 dict->SetString("name", name); |
| 438 dict->SetString("type", type); |
| 439 dict->SetInteger("handed_out_socket_count", handed_out_socket_count_); |
| 440 dict->SetInteger("connecting_socket_count", pending_connects_.size()); |
| 441 dict->SetInteger("idle_socket_count", 0); |
| 442 dict->SetInteger("max_socket_count", max_sockets_); |
| 443 dict->SetInteger("max_sockets_per_group", max_sockets_); |
| 444 dict->SetInteger("pool_generation_number", 0); |
| 445 return dict; |
| 446 } |
| 447 |
| 448 TimeDelta WebSocketTransportClientSocketPool::ConnectionTimeout() const { |
| 449 return TimeDelta::FromSeconds(kTransportConnectJobTimeoutInSeconds); |
| 450 } |
| 451 |
| 452 ClientSocketPoolHistograms* WebSocketTransportClientSocketPool::histograms() |
| 453 const { |
| 454 return histograms_; |
| 455 } |
| 456 |
| 457 bool WebSocketTransportClientSocketPool::IsStalled() const { |
| 458 return !stalled_request_queue_.empty(); |
| 459 } |
| 460 |
| 461 void WebSocketTransportClientSocketPool::OnConnectJobComplete( |
| 462 int result, |
| 463 WebSocketTransportConnectJob* job) { |
| 464 DCHECK_NE(ERR_IO_PENDING, result); |
| 465 |
| 466 scoped_ptr<StreamSocket> socket = job->PassSocket(); |
| 467 |
| 468 // See comment in FlushWithError. |
| 469 if (flushing_) { |
| 470 WebSocketEndpointLockManager::GetInstance()->UnlockSocket(socket.get()); |
| 471 return; |
| 472 } |
| 473 |
| 474 BoundNetLog request_net_log = job->request_net_log(); |
| 475 CompletionCallback callback = job->callback(); |
| 476 LoadTimingInfo::ConnectTiming connect_timing = job->connect_timing(); |
| 477 |
| 478 ClientSocketHandle* const handle = job->handle(); |
| 479 bool handed_out_socket = false; |
| 480 |
| 481 if (result == OK) { |
| 482 DCHECK(socket.get()); |
| 483 handed_out_socket = true; |
| 484 HandOutSocket(socket.Pass(), connect_timing, handle, request_net_log); |
| 485 request_net_log.EndEvent(NetLog::TYPE_SOCKET_POOL); |
| 486 } else { |
| 487 // If we got a socket, it must contain error information so pass that |
| 488 // up so that the caller can retrieve it. |
| 489 job->GetAdditionalErrorState(handle); |
| 490 if (socket.get()) { |
| 491 handed_out_socket = true; |
| 492 HandOutSocket(socket.Pass(), connect_timing, handle, request_net_log); |
| 493 } |
| 494 request_net_log.EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, result); |
| 495 } |
| 496 bool delete_succeeded = DeleteJob(handle); |
| 497 DCHECK(delete_succeeded); |
| 498 if (!handed_out_socket && !stalled_request_queue_.empty() && |
| 499 !ReachedMaxSocketsLimit()) |
| 500 ActivateStalledRequest(); |
| 501 InvokeUserCallbackLater(handle, callback, result); |
| 502 } |
| 503 |
| 504 void WebSocketTransportClientSocketPool::InvokeUserCallbackLater( |
| 505 ClientSocketHandle* handle, |
| 506 const CompletionCallback& callback, |
| 507 int rv) { |
| 508 DCHECK(!pending_callbacks_.count(handle)); |
| 509 pending_callbacks_.insert(handle); |
| 510 base::MessageLoop::current()->PostTask( |
| 511 FROM_HERE, |
| 512 base::Bind(&WebSocketTransportClientSocketPool::InvokeUserCallback, |
| 513 weak_factory_.GetWeakPtr(), |
| 514 handle, |
| 515 callback, |
| 516 rv)); |
| 517 } |
| 518 |
| 519 void WebSocketTransportClientSocketPool::InvokeUserCallback( |
| 520 ClientSocketHandle* handle, |
| 521 const CompletionCallback& callback, |
| 522 int rv) { |
| 523 if (pending_callbacks_.erase(handle)) |
| 524 callback.Run(rv); |
| 525 } |
| 526 |
| 527 bool WebSocketTransportClientSocketPool::ReachedMaxSocketsLimit() const { |
| 528 return handed_out_socket_count_ >= max_sockets_ || |
| 529 base::checked_cast<int>(pending_connects_.size()) >= |
| 530 max_sockets_ - handed_out_socket_count_; |
| 531 } |
| 532 |
| 533 void WebSocketTransportClientSocketPool::HandOutSocket( |
| 534 scoped_ptr<StreamSocket> socket, |
| 535 const LoadTimingInfo::ConnectTiming& connect_timing, |
| 536 ClientSocketHandle* handle, |
| 537 const BoundNetLog& net_log) { |
| 538 DCHECK(socket); |
| 539 handle->SetSocket(socket.Pass()); |
| 540 DCHECK_EQ(ClientSocketHandle::UNUSED, handle->reuse_type()); |
| 541 DCHECK_EQ(0, handle->idle_time().InMicroseconds()); |
| 542 handle->set_pool_id(0); |
| 543 handle->set_connect_timing(connect_timing); |
| 544 |
| 545 net_log.AddEvent( |
| 546 NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET, |
| 547 handle->socket()->NetLog().source().ToEventParametersCallback()); |
| 548 |
| 549 ++handed_out_socket_count_; |
| 550 } |
| 551 |
| 552 void WebSocketTransportClientSocketPool::AddJob( |
| 553 ClientSocketHandle* handle, |
| 554 scoped_ptr<WebSocketTransportConnectJob> connect_job) { |
| 555 bool inserted = |
| 556 pending_connects_.insert(PendingConnectsMap::value_type( |
| 557 handle, connect_job.release())).second; |
| 558 DCHECK(inserted); |
| 559 } |
| 560 |
| 561 bool WebSocketTransportClientSocketPool::DeleteJob(ClientSocketHandle* handle) { |
| 562 PendingConnectsMap::iterator it = pending_connects_.find(handle); |
| 563 if (it == pending_connects_.end()) |
| 564 return false; |
| 565 // Deleting a ConnectJob which holds an endpoint lock can lead to a different |
| 566 // ConnectJob proceeding to connect. If the connect proceeds synchronously |
| 567 // (usually because of a failure) then it can trigger that job to be |
| 568 // deleted. |it| remains valid because std::map guarantees that erase() does |
| 569 // not invalid iterators to other entries. |
| 570 delete it->second, it->second = NULL; |
| 571 DCHECK(pending_connects_.find(handle) == it); |
| 572 pending_connects_.erase(it); |
| 573 return true; |
| 574 } |
| 575 |
| 576 const WebSocketTransportConnectJob* |
| 577 WebSocketTransportClientSocketPool::LookupConnectJob( |
| 578 const ClientSocketHandle* handle) const { |
| 579 PendingConnectsMap::const_iterator it = pending_connects_.find(handle); |
| 580 CHECK(it != pending_connects_.end()); |
| 581 return it->second; |
| 582 } |
| 583 |
| 584 void WebSocketTransportClientSocketPool::ActivateStalledRequest() { |
| 585 DCHECK(!stalled_request_queue_.empty()); |
| 586 DCHECK(!ReachedMaxSocketsLimit()); |
| 587 // Usually we will only be able to activate one stalled request at a time, |
| 588 // however if all the connects fail synchronously for some reason, we may be |
| 589 // able to clear the whole queue at once. |
| 590 while (!stalled_request_queue_.empty() && !ReachedMaxSocketsLimit()) { |
| 591 StalledRequest request(stalled_request_queue_.front()); |
| 592 stalled_request_queue_.pop_front(); |
| 593 stalled_request_map_.erase(request.handle); |
| 594 int rv = RequestSocket("ignored", |
| 595 &request.params, |
| 596 request.priority, |
| 597 request.handle, |
| 598 request.callback, |
| 599 request.net_log); |
| 600 // ActivateStalledRequest() never returns synchronously, so it is never |
| 601 // called re-entrantly. |
| 602 if (rv != ERR_IO_PENDING) |
| 603 InvokeUserCallbackLater(request.handle, request.callback, rv); |
| 604 } |
| 605 } |
| 606 |
| 607 bool WebSocketTransportClientSocketPool::DeleteStalledRequest( |
| 608 ClientSocketHandle* handle) { |
| 609 StalledRequestMap::iterator it = stalled_request_map_.find(handle); |
| 610 if (it == stalled_request_map_.end()) |
| 611 return false; |
| 612 stalled_request_queue_.erase(it->second); |
| 613 stalled_request_map_.erase(it); |
| 614 return true; |
| 615 } |
| 616 |
| 617 WebSocketTransportClientSocketPool::ConnectJobDelegate::ConnectJobDelegate( |
| 618 WebSocketTransportClientSocketPool* owner) |
| 619 : owner_(owner) {} |
| 620 |
| 621 WebSocketTransportClientSocketPool::ConnectJobDelegate::~ConnectJobDelegate() {} |
| 622 |
| 623 void |
| 624 WebSocketTransportClientSocketPool::ConnectJobDelegate::OnConnectJobComplete( |
| 625 int result, |
| 626 ConnectJob* job) { |
| 627 owner_->OnConnectJobComplete(result, |
| 628 static_cast<WebSocketTransportConnectJob*>(job)); |
| 629 } |
| 630 |
| 631 WebSocketTransportClientSocketPool::StalledRequest::StalledRequest( |
| 632 const scoped_refptr<TransportSocketParams>& params, |
| 633 RequestPriority priority, |
| 634 ClientSocketHandle* handle, |
| 635 const CompletionCallback& callback, |
| 636 const BoundNetLog& net_log) |
| 637 : params(params), |
| 638 priority(priority), |
| 639 handle(handle), |
| 640 callback(callback), |
| 641 net_log(net_log) {} |
| 642 |
| 643 WebSocketTransportClientSocketPool::StalledRequest::~StalledRequest() {} |
| 644 |
| 645 } // namespace net |
OLD | NEW |