Index: net/http/http_response_body_drainer.cc |
diff --git a/net/http/http_response_body_drainer.cc b/net/http/http_response_body_drainer.cc |
index aefb18026528f2b90307349fc153e15cbdd7d3c6..bbd27276b1711dafa2b7f12ba563ff454255fe5e 100644 |
--- a/net/http/http_response_body_drainer.cc |
+++ b/net/http/http_response_body_drainer.cc |
@@ -25,7 +25,25 @@ HttpResponseBodyDrainer::HttpResponseBodyDrainer(HttpStream* stream) |
HttpResponseBodyDrainer::~HttpResponseBodyDrainer() {} |
void HttpResponseBodyDrainer::Start(HttpNetworkSession* session) { |
- read_buf_ = new IOBuffer(kDrainBodyBufferSize); |
+ StartWithSize(session, kDrainBodyBufferSize); |
+} |
+ |
+void HttpResponseBodyDrainer::StartWithSize(HttpNetworkSession* session, |
+ int num_bytes_to_drain) { |
+ DCHECK_LE(0, num_bytes_to_drain); |
+ // TODO(simonjam): Consider raising this limit if we're pipelining. If we have |
+ // a bunch of responses in the pipeline, we should be less willing to give up |
+ // while draining. |
+ if (num_bytes_to_drain > kDrainBodyBufferSize) { |
+ Finish(ERR_RESPONSE_BODY_TOO_BIG_TO_DRAIN); |
+ return; |
+ } else if (num_bytes_to_drain == 0) { |
+ Finish(OK); |
+ return; |
+ } |
+ |
+ read_size_ = num_bytes_to_drain; |
+ read_buf_ = new IOBuffer(read_size_); |
next_state_ = STATE_DRAIN_RESPONSE_BODY; |
int rv = DoLoop(OK); |
@@ -71,7 +89,7 @@ int HttpResponseBodyDrainer::DoDrainResponseBody() { |
next_state_ = STATE_DRAIN_RESPONSE_BODY_COMPLETE; |
return stream_->ReadResponseBody( |
- read_buf_, kDrainBodyBufferSize - total_read_, |
+ read_buf_, read_size_ - total_read_, |
&io_callback_); |
} |