Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(76)

Side by Side Diff: net/http/http_stream_factory_impl_request.cc

Issue 275953002: Remove HTTP pipelining support. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fix line endings Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « net/http/http_stream_factory_impl_request.h ('k') | net/net.gypi » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/http/http_stream_factory_impl_request.h" 5 #include "net/http/http_stream_factory_impl_request.h"
6 6
7 #include "base/callback.h" 7 #include "base/callback.h"
8 #include "base/logging.h" 8 #include "base/logging.h"
9 #include "base/stl_util.h" 9 #include "base/stl_util.h"
10 #include "net/http/http_stream_factory_impl_job.h" 10 #include "net/http/http_stream_factory_impl_job.h"
(...skipping 30 matching lines...) Expand all
41 DCHECK(jobs_.empty()); 41 DCHECK(jobs_.empty());
42 else 42 else
43 DCHECK(!jobs_.empty()); 43 DCHECK(!jobs_.empty());
44 44
45 net_log_.EndEvent(NetLog::TYPE_HTTP_STREAM_REQUEST); 45 net_log_.EndEvent(NetLog::TYPE_HTTP_STREAM_REQUEST);
46 46
47 for (std::set<Job*>::iterator it = jobs_.begin(); it != jobs_.end(); ++it) 47 for (std::set<Job*>::iterator it = jobs_.begin(); it != jobs_.end(); ++it)
48 factory_->request_map_.erase(*it); 48 factory_->request_map_.erase(*it);
49 49
50 RemoveRequestFromSpdySessionRequestMap(); 50 RemoveRequestFromSpdySessionRequestMap();
51 RemoveRequestFromHttpPipeliningRequestMap();
52 51
53 STLDeleteElements(&jobs_); 52 STLDeleteElements(&jobs_);
54 } 53 }
55 54
56 void HttpStreamFactoryImpl::Request::SetSpdySessionKey( 55 void HttpStreamFactoryImpl::Request::SetSpdySessionKey(
57 const SpdySessionKey& spdy_session_key) { 56 const SpdySessionKey& spdy_session_key) {
58 DCHECK(!spdy_session_key_.get()); 57 DCHECK(!spdy_session_key_.get());
59 spdy_session_key_.reset(new SpdySessionKey(spdy_session_key)); 58 spdy_session_key_.reset(new SpdySessionKey(spdy_session_key));
60 RequestSet& request_set = 59 RequestSet& request_set =
61 factory_->spdy_session_request_map_[spdy_session_key]; 60 factory_->spdy_session_request_map_[spdy_session_key];
62 DCHECK(!ContainsKey(request_set, this)); 61 DCHECK(!ContainsKey(request_set, this));
63 request_set.insert(this); 62 request_set.insert(this);
64 } 63 }
65 64
66 bool HttpStreamFactoryImpl::Request::SetHttpPipeliningKey(
67 const HttpPipelinedHost::Key& http_pipelining_key) {
68 CHECK(!http_pipelining_key_.get());
69 http_pipelining_key_.reset(new HttpPipelinedHost::Key(http_pipelining_key));
70 bool was_new_key = !ContainsKey(factory_->http_pipelining_request_map_,
71 http_pipelining_key);
72 RequestVector& request_vector =
73 factory_->http_pipelining_request_map_[http_pipelining_key];
74 request_vector.push_back(this);
75 return was_new_key;
76 }
77
78 void HttpStreamFactoryImpl::Request::AttachJob(Job* job) { 65 void HttpStreamFactoryImpl::Request::AttachJob(Job* job) {
79 DCHECK(job); 66 DCHECK(job);
80 jobs_.insert(job); 67 jobs_.insert(job);
81 factory_->request_map_[job] = this; 68 factory_->request_map_[job] = this;
82 } 69 }
83 70
84 void HttpStreamFactoryImpl::Request::Complete( 71 void HttpStreamFactoryImpl::Request::Complete(
85 bool was_npn_negotiated, 72 bool was_npn_negotiated,
86 NextProto protocol_negotiated, 73 NextProto protocol_negotiated,
87 bool using_spdy, 74 bool using_spdy,
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
124 OnJobSucceeded(job); 111 OnJobSucceeded(job);
125 delegate_->OnWebSocketHandshakeStreamReady( 112 delegate_->OnWebSocketHandshakeStreamReady(
126 used_ssl_config, used_proxy_info, stream); 113 used_ssl_config, used_proxy_info, stream);
127 } 114 }
128 115
129 void HttpStreamFactoryImpl::Request::OnStreamFailed( 116 void HttpStreamFactoryImpl::Request::OnStreamFailed(
130 Job* job, 117 Job* job,
131 int status, 118 int status,
132 const SSLConfig& used_ssl_config) { 119 const SSLConfig& used_ssl_config) {
133 DCHECK_NE(OK, status); 120 DCHECK_NE(OK, status);
134 // |job| should only be NULL if we're being canceled by a late bound 121 DCHECK(job);
135 // HttpPipelinedConnection (one that was not created by a job in our |jobs_| 122 if (!bound_job_.get()) {
136 // set).
137 if (!job) {
138 DCHECK(!bound_job_.get());
139 DCHECK(!jobs_.empty());
140 // NOTE(willchan): We do *NOT* call OrphanJobs() here. The reason is because
141 // we *WANT* to cancel the unnecessary Jobs from other requests if another
142 // Job completes first.
143 } else if (!bound_job_.get()) {
144 // Hey, we've got other jobs! Maybe one of them will succeed, let's just 123 // Hey, we've got other jobs! Maybe one of them will succeed, let's just
145 // ignore this failure. 124 // ignore this failure.
146 if (jobs_.size() > 1) { 125 if (jobs_.size() > 1) {
147 jobs_.erase(job); 126 jobs_.erase(job);
148 factory_->request_map_.erase(job); 127 factory_->request_map_.erase(job);
149 // Notify all the other jobs that this one failed. 128 // Notify all the other jobs that this one failed.
150 for (std::set<Job*>::iterator it = jobs_.begin(); it != jobs_.end(); ++it) 129 for (std::set<Job*>::iterator it = jobs_.begin(); it != jobs_.end(); ++it)
151 (*it)->MarkOtherJobComplete(*job); 130 (*it)->MarkOtherJobComplete(*job);
152 delete job; 131 delete job;
153 return; 132 return;
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
264 RequestSet& request_set = 243 RequestSet& request_set =
265 spdy_session_request_map[*spdy_session_key_]; 244 spdy_session_request_map[*spdy_session_key_];
266 DCHECK(ContainsKey(request_set, this)); 245 DCHECK(ContainsKey(request_set, this));
267 request_set.erase(this); 246 request_set.erase(this);
268 if (request_set.empty()) 247 if (request_set.empty())
269 spdy_session_request_map.erase(*spdy_session_key_); 248 spdy_session_request_map.erase(*spdy_session_key_);
270 spdy_session_key_.reset(); 249 spdy_session_key_.reset();
271 } 250 }
272 } 251 }
273 252
274 void
275 HttpStreamFactoryImpl::Request::RemoveRequestFromHttpPipeliningRequestMap() {
276 if (http_pipelining_key_.get()) {
277 HttpPipeliningRequestMap& http_pipelining_request_map =
278 factory_->http_pipelining_request_map_;
279 DCHECK(ContainsKey(http_pipelining_request_map, *http_pipelining_key_));
280 RequestVector& request_vector =
281 http_pipelining_request_map[*http_pipelining_key_];
282 for (RequestVector::iterator it = request_vector.begin();
283 it != request_vector.end(); ++it) {
284 if (*it == this) {
285 request_vector.erase(it);
286 break;
287 }
288 }
289 if (request_vector.empty())
290 http_pipelining_request_map.erase(*http_pipelining_key_);
291 http_pipelining_key_.reset();
292 }
293 }
294
295 void HttpStreamFactoryImpl::Request::OnNewSpdySessionReady( 253 void HttpStreamFactoryImpl::Request::OnNewSpdySessionReady(
296 Job* job, 254 Job* job,
297 scoped_ptr<HttpStream> stream, 255 scoped_ptr<HttpStream> stream,
298 const base::WeakPtr<SpdySession>& spdy_session, 256 const base::WeakPtr<SpdySession>& spdy_session,
299 bool direct) { 257 bool direct) {
300 DCHECK(job); 258 DCHECK(job);
301 DCHECK(job->using_spdy()); 259 DCHECK(job->using_spdy());
302 260
303 // Note: |spdy_session| may be NULL. In that case, |delegate_| should still 261 // Note: |spdy_session| may be NULL. In that case, |delegate_| should still
304 // receive |stream| so the error propogates up correctly, however there is no 262 // receive |stream| so the error propogates up correctly, however there is no
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
352 DCHECK(ContainsKey(jobs_, job)); 310 DCHECK(ContainsKey(jobs_, job));
353 bound_job_.reset(job); 311 bound_job_.reset(job);
354 jobs_.erase(job); 312 jobs_.erase(job);
355 factory_->request_map_.erase(job); 313 factory_->request_map_.erase(job);
356 314
357 OrphanJobs(); 315 OrphanJobs();
358 } 316 }
359 317
360 void HttpStreamFactoryImpl::Request::OrphanJobs() { 318 void HttpStreamFactoryImpl::Request::OrphanJobs() {
361 RemoveRequestFromSpdySessionRequestMap(); 319 RemoveRequestFromSpdySessionRequestMap();
362 RemoveRequestFromHttpPipeliningRequestMap();
363 320
364 std::set<Job*> tmp; 321 std::set<Job*> tmp;
365 tmp.swap(jobs_); 322 tmp.swap(jobs_);
366 323
367 for (std::set<Job*>::iterator it = tmp.begin(); it != tmp.end(); ++it) 324 for (std::set<Job*>::iterator it = tmp.begin(); it != tmp.end(); ++it)
368 factory_->OrphanJob(*it, this); 325 factory_->OrphanJob(*it, this);
369 } 326 }
370 327
371 void HttpStreamFactoryImpl::Request::OnJobSucceeded(Job* job) { 328 void HttpStreamFactoryImpl::Request::OnJobSucceeded(Job* job) {
372 // |job| should only be NULL if we're being serviced by a late bound 329 // |job| should only be NULL if we're being serviced by a late bound
373 // SpdySession or HttpPipelinedConnection (one that was not created by a job 330 // SpdySession (one that was not created by a job in our |jobs_| set).
374 // in our |jobs_| set).
375 if (!job) { 331 if (!job) {
376 DCHECK(!bound_job_.get()); 332 DCHECK(!bound_job_.get());
377 DCHECK(!jobs_.empty()); 333 DCHECK(!jobs_.empty());
378 // NOTE(willchan): We do *NOT* call OrphanJobs() here. The reason is because 334 // NOTE(willchan): We do *NOT* call OrphanJobs() here. The reason is because
379 // we *WANT* to cancel the unnecessary Jobs from other requests if another 335 // we *WANT* to cancel the unnecessary Jobs from other requests if another
380 // Job completes first. 336 // Job completes first.
381 // TODO(mbelshe): Revisit this when we implement ip connection pooling of 337 // TODO(mbelshe): Revisit this when we implement ip connection pooling of
382 // SpdySessions. Do we want to orphan the jobs for a different hostname so 338 // SpdySessions. Do we want to orphan the jobs for a different hostname so
383 // they complete? Or do we want to prevent connecting a new SpdySession if 339 // they complete? Or do we want to prevent connecting a new SpdySession if
384 // we've already got one available for a different hostname where the ip 340 // we've already got one available for a different hostname where the ip
(...skipping 11 matching lines...) Expand all
396 } 352 }
397 // We may have other jobs in |jobs_|. For example, if we start multiple jobs 353 // We may have other jobs in |jobs_|. For example, if we start multiple jobs
398 // for Alternate-Protocol. 354 // for Alternate-Protocol.
399 OrphanJobsExcept(job); 355 OrphanJobsExcept(job);
400 return; 356 return;
401 } 357 }
402 DCHECK(jobs_.empty()); 358 DCHECK(jobs_.empty());
403 } 359 }
404 360
405 } // namespace net 361 } // namespace net
OLDNEW
« no previous file with comments | « net/http/http_stream_factory_impl_request.h ('k') | net/net.gypi » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698