| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/http/http_stream_factory_impl_request.h" | 5 #include "net/http/http_stream_factory_impl_request.h" |
| 6 | 6 |
| 7 #include "base/callback.h" | 7 #include "base/callback.h" |
| 8 #include "base/logging.h" | 8 #include "base/logging.h" |
| 9 #include "base/stl_util.h" | 9 #include "base/stl_util.h" |
| 10 #include "net/http/http_stream_factory_impl_job.h" | 10 #include "net/http/http_stream_factory_impl_job.h" |
| (...skipping 30 matching lines...) Expand all Loading... |
| 41 DCHECK(jobs_.empty()); | 41 DCHECK(jobs_.empty()); |
| 42 else | 42 else |
| 43 DCHECK(!jobs_.empty()); | 43 DCHECK(!jobs_.empty()); |
| 44 | 44 |
| 45 net_log_.EndEvent(NetLog::TYPE_HTTP_STREAM_REQUEST); | 45 net_log_.EndEvent(NetLog::TYPE_HTTP_STREAM_REQUEST); |
| 46 | 46 |
| 47 for (std::set<Job*>::iterator it = jobs_.begin(); it != jobs_.end(); ++it) | 47 for (std::set<Job*>::iterator it = jobs_.begin(); it != jobs_.end(); ++it) |
| 48 factory_->request_map_.erase(*it); | 48 factory_->request_map_.erase(*it); |
| 49 | 49 |
| 50 RemoveRequestFromSpdySessionRequestMap(); | 50 RemoveRequestFromSpdySessionRequestMap(); |
| 51 RemoveRequestFromHttpPipeliningRequestMap(); | |
| 52 | 51 |
| 53 STLDeleteElements(&jobs_); | 52 STLDeleteElements(&jobs_); |
| 54 } | 53 } |
| 55 | 54 |
| 56 void HttpStreamFactoryImpl::Request::SetSpdySessionKey( | 55 void HttpStreamFactoryImpl::Request::SetSpdySessionKey( |
| 57 const SpdySessionKey& spdy_session_key) { | 56 const SpdySessionKey& spdy_session_key) { |
| 58 DCHECK(!spdy_session_key_.get()); | 57 DCHECK(!spdy_session_key_.get()); |
| 59 spdy_session_key_.reset(new SpdySessionKey(spdy_session_key)); | 58 spdy_session_key_.reset(new SpdySessionKey(spdy_session_key)); |
| 60 RequestSet& request_set = | 59 RequestSet& request_set = |
| 61 factory_->spdy_session_request_map_[spdy_session_key]; | 60 factory_->spdy_session_request_map_[spdy_session_key]; |
| 62 DCHECK(!ContainsKey(request_set, this)); | 61 DCHECK(!ContainsKey(request_set, this)); |
| 63 request_set.insert(this); | 62 request_set.insert(this); |
| 64 } | 63 } |
| 65 | 64 |
| 66 bool HttpStreamFactoryImpl::Request::SetHttpPipeliningKey( | |
| 67 const HttpPipelinedHost::Key& http_pipelining_key) { | |
| 68 CHECK(!http_pipelining_key_.get()); | |
| 69 http_pipelining_key_.reset(new HttpPipelinedHost::Key(http_pipelining_key)); | |
| 70 bool was_new_key = !ContainsKey(factory_->http_pipelining_request_map_, | |
| 71 http_pipelining_key); | |
| 72 RequestVector& request_vector = | |
| 73 factory_->http_pipelining_request_map_[http_pipelining_key]; | |
| 74 request_vector.push_back(this); | |
| 75 return was_new_key; | |
| 76 } | |
| 77 | |
| 78 void HttpStreamFactoryImpl::Request::AttachJob(Job* job) { | 65 void HttpStreamFactoryImpl::Request::AttachJob(Job* job) { |
| 79 DCHECK(job); | 66 DCHECK(job); |
| 80 jobs_.insert(job); | 67 jobs_.insert(job); |
| 81 factory_->request_map_[job] = this; | 68 factory_->request_map_[job] = this; |
| 82 } | 69 } |
| 83 | 70 |
| 84 void HttpStreamFactoryImpl::Request::Complete( | 71 void HttpStreamFactoryImpl::Request::Complete( |
| 85 bool was_npn_negotiated, | 72 bool was_npn_negotiated, |
| 86 NextProto protocol_negotiated, | 73 NextProto protocol_negotiated, |
| 87 bool using_spdy, | 74 bool using_spdy, |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 124 OnJobSucceeded(job); | 111 OnJobSucceeded(job); |
| 125 delegate_->OnWebSocketHandshakeStreamReady( | 112 delegate_->OnWebSocketHandshakeStreamReady( |
| 126 used_ssl_config, used_proxy_info, stream); | 113 used_ssl_config, used_proxy_info, stream); |
| 127 } | 114 } |
| 128 | 115 |
| 129 void HttpStreamFactoryImpl::Request::OnStreamFailed( | 116 void HttpStreamFactoryImpl::Request::OnStreamFailed( |
| 130 Job* job, | 117 Job* job, |
| 131 int status, | 118 int status, |
| 132 const SSLConfig& used_ssl_config) { | 119 const SSLConfig& used_ssl_config) { |
| 133 DCHECK_NE(OK, status); | 120 DCHECK_NE(OK, status); |
| 134 // |job| should only be NULL if we're being canceled by a late bound | 121 DCHECK(job); |
| 135 // HttpPipelinedConnection (one that was not created by a job in our |jobs_| | 122 if (!bound_job_.get()) { |
| 136 // set). | |
| 137 if (!job) { | |
| 138 DCHECK(!bound_job_.get()); | |
| 139 DCHECK(!jobs_.empty()); | |
| 140 // NOTE(willchan): We do *NOT* call OrphanJobs() here. The reason is because | |
| 141 // we *WANT* to cancel the unnecessary Jobs from other requests if another | |
| 142 // Job completes first. | |
| 143 } else if (!bound_job_.get()) { | |
| 144 // Hey, we've got other jobs! Maybe one of them will succeed, let's just | 123 // Hey, we've got other jobs! Maybe one of them will succeed, let's just |
| 145 // ignore this failure. | 124 // ignore this failure. |
| 146 if (jobs_.size() > 1) { | 125 if (jobs_.size() > 1) { |
| 147 jobs_.erase(job); | 126 jobs_.erase(job); |
| 148 factory_->request_map_.erase(job); | 127 factory_->request_map_.erase(job); |
| 149 delete job; | 128 delete job; |
| 150 return; | 129 return; |
| 151 } else { | 130 } else { |
| 152 bound_job_.reset(job); | 131 bound_job_.reset(job); |
| 153 jobs_.erase(job); | 132 jobs_.erase(job); |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 261 RequestSet& request_set = | 240 RequestSet& request_set = |
| 262 spdy_session_request_map[*spdy_session_key_]; | 241 spdy_session_request_map[*spdy_session_key_]; |
| 263 DCHECK(ContainsKey(request_set, this)); | 242 DCHECK(ContainsKey(request_set, this)); |
| 264 request_set.erase(this); | 243 request_set.erase(this); |
| 265 if (request_set.empty()) | 244 if (request_set.empty()) |
| 266 spdy_session_request_map.erase(*spdy_session_key_); | 245 spdy_session_request_map.erase(*spdy_session_key_); |
| 267 spdy_session_key_.reset(); | 246 spdy_session_key_.reset(); |
| 268 } | 247 } |
| 269 } | 248 } |
| 270 | 249 |
| 271 void | |
| 272 HttpStreamFactoryImpl::Request::RemoveRequestFromHttpPipeliningRequestMap() { | |
| 273 if (http_pipelining_key_.get()) { | |
| 274 HttpPipeliningRequestMap& http_pipelining_request_map = | |
| 275 factory_->http_pipelining_request_map_; | |
| 276 DCHECK(ContainsKey(http_pipelining_request_map, *http_pipelining_key_)); | |
| 277 RequestVector& request_vector = | |
| 278 http_pipelining_request_map[*http_pipelining_key_]; | |
| 279 for (RequestVector::iterator it = request_vector.begin(); | |
| 280 it != request_vector.end(); ++it) { | |
| 281 if (*it == this) { | |
| 282 request_vector.erase(it); | |
| 283 break; | |
| 284 } | |
| 285 } | |
| 286 if (request_vector.empty()) | |
| 287 http_pipelining_request_map.erase(*http_pipelining_key_); | |
| 288 http_pipelining_key_.reset(); | |
| 289 } | |
| 290 } | |
| 291 | |
| 292 void HttpStreamFactoryImpl::Request::OnNewSpdySessionReady( | 250 void HttpStreamFactoryImpl::Request::OnNewSpdySessionReady( |
| 293 Job* job, | 251 Job* job, |
| 294 scoped_ptr<HttpStream> stream, | 252 scoped_ptr<HttpStream> stream, |
| 295 const base::WeakPtr<SpdySession>& spdy_session, | 253 const base::WeakPtr<SpdySession>& spdy_session, |
| 296 bool direct) { | 254 bool direct) { |
| 297 DCHECK(job); | 255 DCHECK(job); |
| 298 DCHECK(job->using_spdy()); | 256 DCHECK(job->using_spdy()); |
| 299 | 257 |
| 300 // Note: |spdy_session| may be NULL. In that case, |delegate_| should still | 258 // Note: |spdy_session| may be NULL. In that case, |delegate_| should still |
| 301 // receive |stream| so the error propogates up correctly, however there is no | 259 // receive |stream| so the error propogates up correctly, however there is no |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 349 DCHECK(ContainsKey(jobs_, job)); | 307 DCHECK(ContainsKey(jobs_, job)); |
| 350 bound_job_.reset(job); | 308 bound_job_.reset(job); |
| 351 jobs_.erase(job); | 309 jobs_.erase(job); |
| 352 factory_->request_map_.erase(job); | 310 factory_->request_map_.erase(job); |
| 353 | 311 |
| 354 OrphanJobs(); | 312 OrphanJobs(); |
| 355 } | 313 } |
| 356 | 314 |
| 357 void HttpStreamFactoryImpl::Request::OrphanJobs() { | 315 void HttpStreamFactoryImpl::Request::OrphanJobs() { |
| 358 RemoveRequestFromSpdySessionRequestMap(); | 316 RemoveRequestFromSpdySessionRequestMap(); |
| 359 RemoveRequestFromHttpPipeliningRequestMap(); | |
| 360 | 317 |
| 361 std::set<Job*> tmp; | 318 std::set<Job*> tmp; |
| 362 tmp.swap(jobs_); | 319 tmp.swap(jobs_); |
| 363 | 320 |
| 364 for (std::set<Job*>::iterator it = tmp.begin(); it != tmp.end(); ++it) | 321 for (std::set<Job*>::iterator it = tmp.begin(); it != tmp.end(); ++it) |
| 365 factory_->OrphanJob(*it, this); | 322 factory_->OrphanJob(*it, this); |
| 366 } | 323 } |
| 367 | 324 |
| 368 void HttpStreamFactoryImpl::Request::OnJobSucceeded(Job* job) { | 325 void HttpStreamFactoryImpl::Request::OnJobSucceeded(Job* job) { |
| 369 // |job| should only be NULL if we're being serviced by a late bound | 326 // |job| should only be NULL if we're being serviced by a late bound |
| 370 // SpdySession or HttpPipelinedConnection (one that was not created by a job | 327 // SpdySession (one that was not created by a job in our |jobs_| set). |
| 371 // in our |jobs_| set). | |
| 372 if (!job) { | 328 if (!job) { |
| 373 DCHECK(!bound_job_.get()); | 329 DCHECK(!bound_job_.get()); |
| 374 DCHECK(!jobs_.empty()); | 330 DCHECK(!jobs_.empty()); |
| 375 // NOTE(willchan): We do *NOT* call OrphanJobs() here. The reason is because | 331 // NOTE(willchan): We do *NOT* call OrphanJobs() here. The reason is because |
| 376 // we *WANT* to cancel the unnecessary Jobs from other requests if another | 332 // we *WANT* to cancel the unnecessary Jobs from other requests if another |
| 377 // Job completes first. | 333 // Job completes first. |
| 378 // TODO(mbelshe): Revisit this when we implement ip connection pooling of | 334 // TODO(mbelshe): Revisit this when we implement ip connection pooling of |
| 379 // SpdySessions. Do we want to orphan the jobs for a different hostname so | 335 // SpdySessions. Do we want to orphan the jobs for a different hostname so |
| 380 // they complete? Or do we want to prevent connecting a new SpdySession if | 336 // they complete? Or do we want to prevent connecting a new SpdySession if |
| 381 // we've already got one available for a different hostname where the ip | 337 // we've already got one available for a different hostname where the ip |
| 382 // address matches up? | 338 // address matches up? |
| 383 return; | 339 return; |
| 384 } | 340 } |
| 385 if (!bound_job_.get()) { | 341 if (!bound_job_.get()) { |
| 386 if (jobs_.size() > 1) | 342 if (jobs_.size() > 1) |
| 387 job->ReportJobSuccededForRequest(); | 343 job->ReportJobSuccededForRequest(); |
| 388 // We may have other jobs in |jobs_|. For example, if we start multiple jobs | 344 // We may have other jobs in |jobs_|. For example, if we start multiple jobs |
| 389 // for Alternate-Protocol. | 345 // for Alternate-Protocol. |
| 390 OrphanJobsExcept(job); | 346 OrphanJobsExcept(job); |
| 391 return; | 347 return; |
| 392 } | 348 } |
| 393 DCHECK(jobs_.empty()); | 349 DCHECK(jobs_.empty()); |
| 394 } | 350 } |
| 395 | 351 |
| 396 } // namespace net | 352 } // namespace net |
| OLD | NEW |