Chromium Code Reviews| Index: net/quic/quic_stream_sequencer.cc |
| diff --git a/net/quic/quic_stream_sequencer.cc b/net/quic/quic_stream_sequencer.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..5baa3ec2aec2b61b590fc714d7260fe4e604bb72 |
| --- /dev/null |
| +++ b/net/quic/quic_stream_sequencer.cc |
| @@ -0,0 +1,263 @@ |
| +// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "net/quic/quic_stream_sequencer.h" |
| + |
| +#include <algorithm> |
| +#include <limits> |
| + |
| +#include "base/logging.h" |
| +#include "net/quic/reliable_quic_stream.h" |
| + |
| +using std::min; |
| +using std::numeric_limits; |
| + |
| +namespace net { |
| + |
| +QuicStreamSequencer::QuicStreamSequencer(ReliableQuicStream* quic_stream) |
| + : stream_(quic_stream), |
| + num_bytes_consumed_(0), |
| + max_frame_memory_(numeric_limits<size_t>::max()), |
| + close_offset_(numeric_limits<QuicStreamOffset>::max()), |
| + half_close_(true) { |
| +} |
| + |
| +QuicStreamSequencer::QuicStreamSequencer(size_t max_frame_memory, |
| + ReliableQuicStream* quic_stream) |
| + : stream_(quic_stream), |
| + num_bytes_consumed_(0), |
| + max_frame_memory_(max_frame_memory), |
| + close_offset_(numeric_limits<QuicStreamOffset>::max()), |
| + half_close_(true) { |
| + if (max_frame_memory < kMaxPacketSize) { |
| + LOG(DFATAL) << "Setting max frame memory to " << max_frame_memory |
| + << ". Some frames will be impossible to handle."; |
| + } |
| +} |
| + |
| +QuicStreamSequencer::~QuicStreamSequencer() { |
| +} |
| + |
| +bool QuicStreamSequencer::WillAcceptStreamFrame( |
| + const QuicStreamFrame& frame) const { |
| + QuicStreamOffset byte_offset = frame.offset; |
| + size_t data_len = frame.data.size(); |
|
jar (doing other things)
2012/10/31 22:37:37
nit: declare/init as close to first use as possibl
Ryan Hamilton
2012/11/01 22:52:20
I assume you're referring to byte_offset, not data
jar (doing other things)
2012/11/01 23:20:38
I think my comment was mostly bogus. I saw this p
|
| + DCHECK_LE(data_len, max_frame_memory_); |
| + |
| + if (byte_offset < num_bytes_consumed_ || |
| + frames_.find(byte_offset) != frames_.end()) { |
| + return false; |
| + } |
| + if (data_len > max_frame_memory_) { |
| + // We're never going to buffer this frame and we can't pass it up the |
| + // stream might only consume part of it and we'd need a partial ack. |
|
jar (doing other things)
2012/10/31 22:37:37
nit: typo? "...can't pass it up the stream might.
Ryan Hamilton
2012/11/01 22:52:20
Fixed upstream.
|
| + // |
| + // Ideally this should never happen, as we check that |
| + // max_frame_memory_ > kMaxPacketSize and lower levels should reject |
| + // frames larger than that. |
| + return false; |
| + } |
| + if (byte_offset + data_len - num_bytes_consumed_ > max_frame_memory_) { |
| + // We can buffer this but not right now. Toss it. |
| + // It might be worth trying an experiment where we try best-effort buffering |
| + return false; |
| + } |
| + return true; |
| +} |
| + |
| +bool QuicStreamSequencer::OnStreamFrame(const QuicStreamFrame& frame) { |
| + QuicStreamOffset byte_offset = frame.offset; |
| + const char* data = frame.data.data(); |
| + size_t data_len = frame.data.size(); |
|
jar (doing other things)
2012/10/31 22:37:37
nit: define/init as close to first use as possible
Ryan Hamilton
2012/11/01 22:52:20
Fixed upstream.
|
| + |
| + if (!WillAcceptStreamFrame(frame)) { |
| + // This should not happen, as WillAcceptFrame should be called before |
|
jar (doing other things)
2012/10/31 22:37:37
Shouldn't this be a DCHECK(), even if we do put th
Ryan Hamilton
2012/11/01 22:52:20
Could be a DCHECK, but that complicates unit tests
|
| + // OnStreamFrame. Error handling should be done by the caller. |
| + return false; |
| + } |
| + |
| + if (byte_offset == num_bytes_consumed_) { |
| + DVLOG(1) << "Processing byte offset " << byte_offset; |
| + size_t bytes_consumed = stream_->ProcessData(data, data_len); |
| + num_bytes_consumed_ += bytes_consumed; |
| + |
| + if (MaybeCloseStream()) { |
| + return true; |
| + } |
| + if (bytes_consumed > data_len) { |
| + stream_->Close(QUIC_SERVER_ERROR_PROCESSING_STREAM); |
| + return false; |
| + } else if (bytes_consumed == data_len) { |
| + FlushBufferedFrames(); |
| + return true; // it's safe to ack this frame. |
| + } else { |
| + // Set ourselves up to buffer what's left |
| + data_len -= bytes_consumed; |
| + data += bytes_consumed; |
| + byte_offset += bytes_consumed; |
| + } |
| + } |
| + |
| + DVLOG(1) << "Buffering packet at offset " << byte_offset; |
| + frames_.insert(make_pair(byte_offset, string(data, data_len))); |
| + return true; |
| +} |
| + |
| +void QuicStreamSequencer::CloseStreamAtOffset(QuicStreamOffset offset, |
| + bool half_close) { |
| + const QuicStreamOffset kMaxOffset = numeric_limits<QuicStreamOffset>::max(); |
| + |
| + // If we have a scheduled termination or close, any new offset should match |
| + // it. |
| + if (close_offset_ != kMaxOffset && offset != close_offset_) { |
| + stream_->Close(QUIC_MULTIPLE_TERMINATION_OFFSETS); |
| + return; |
| + } |
| + |
| + close_offset_ = offset; |
| + // Full close overrides half close. |
| + if (half_close == false) { |
| + half_close_ = false; |
| + } |
| + |
| + MaybeCloseStream(); |
| +} |
| + |
| +bool QuicStreamSequencer::MaybeCloseStream() { |
| + if (IsHalfClosed()) { |
| + DVLOG(1) << "Passing up termination, as we've processed " |
| + << num_bytes_consumed_ << " of " << close_offset_ |
| + << " bytes."; |
| + // Technically it's an error if num_bytes_consumed isn't exactly |
| + // equal, but error handling seems silly at this point. |
| + stream_->TerminateFromPeer(half_close_); |
| + return true; |
| + } |
| + return false; |
| +} |
| + |
| +size_t QuicStreamSequencer::GetReadableRegions(iovec* iov, size_t iov_len) { |
| + FrameMap::iterator it = frames_.begin(); |
| + size_t idx = 0; |
|
jar (doing other things)
2012/10/31 22:37:37
nit: idx --> index
Ryan Hamilton
2012/11/01 22:52:20
Fixed upstream.
|
| + QuicStreamOffset offset = num_bytes_consumed_; |
| + while (it != frames_.end() && idx < iov_len) { |
| + if (it->first != offset) return idx; |
| + |
| + iov[idx].iov_base = static_cast<void*>( |
| + const_cast<char*>(it->second.c_str())); |
|
jar (doing other things)
2012/10/31 22:37:37
Should this be second.data()? I think the other c
alyssar
2012/11/01 17:43:47
+1 to data() just for clarity
Ryan Hamilton
2012/11/01 22:52:20
Fixed upstream.
|
| + iov[idx].iov_len = it->second.size(); |
| + offset += it->second.size(); |
| + |
| + ++idx; |
| + ++it; |
| + } |
| + return idx; |
| +} |
| + |
| +void QuicStreamSequencer::AdvanceReadablePtr(size_t data_read) { |
| + FrameMap::iterator it = frames_.begin(); |
|
jar (doing other things)
2012/10/31 22:37:37
nit: putting this after line 161 will mean that yo
Ryan Hamilton
2012/11/01 22:52:20
Fixed upstream.
|
| + |
| + while (data_read) { |
| + if (it->first != num_bytes_consumed_ || it == frames_.end()) { |
|
jar (doing other things)
2012/10/31 22:37:37
Shouldn't you first test for frames_.end()?
Ryan Hamilton
2012/11/01 22:52:20
Fixed upstream.
|
| + stream_->Close(QUIC_SERVER_ERROR_PROCESSING_STREAM); // Programming error |
| + return; |
| + } |
| + |
| + if (data_read >= it->second.size()) { |
| + data_read -= it->second.size(); |
| + num_bytes_consumed_ += it->second.size(); |
| + frames_.erase(it); |
| + it = frames_.begin(); |
| + } else { |
| + frames_.insert(make_pair(it->first + data_read, |
| + it->second.substr(data_read))); |
| + frames_.erase(frames_.begin()); |
| + num_bytes_consumed_ += data_read; |
| + data_read = 0; |
| + } |
| + } |
| +} |
| + |
| +size_t QuicStreamSequencer::Readv(const struct iovec* iov, size_t iov_len) { |
| + FrameMap::iterator it = frames_.begin(); |
| + size_t iov_idx = 0; |
|
jar (doing other things)
2012/10/31 22:37:37
nit: iov_index? (I found other folks using the "
Ryan Hamilton
2012/11/01 22:52:20
Fixed upstream.
|
| + size_t iov_offset = 0; |
| + size_t frame_offset = 0; |
| + size_t initial_bytes_consumed = num_bytes_consumed_; |
| + |
| + while (iov_idx < iov_len && |
| + it != frames_.end() && |
| + it->first == num_bytes_consumed_) { |
| + size_t bytes_to_read = min(iov[iov_idx].iov_len - iov_offset, |
| + it->second.size() - frame_offset); |
| + |
| + char* iov_ptr = static_cast<char*>(iov[iov_idx].iov_base) + iov_offset; |
|
jar (doing other things)
2012/10/31 22:37:37
This has me a bit scared. Some of the iov[].iov_b
alyssar
2012/11/01 17:43:47
I think there's some confusion about the memory mo
alyssar
2012/11/01 17:43:47
I think there's some confusion about the memory mo
Ryan Hamilton
2012/11/01 22:52:20
Changed to use data().
|
| + memcpy(iov_ptr, |
| + it->second.c_str() + frame_offset, bytes_to_read); |
| + frame_offset += bytes_to_read; |
| + iov_offset += bytes_to_read; |
| + |
| + if (iov[iov_idx].iov_len == iov_offset) { |
| + // We've filled this buffer. |
| + iov_offset = 0; |
| + ++iov_idx; |
| + } |
| + if (it->second.size() == frame_offset) { |
| + // We've copied this whole frame |
| + num_bytes_consumed_ += it->second.size(); |
| + frames_.erase(it); |
| + it = frames_.begin(); |
| + frame_offset = 0; |
| + } |
| + } |
| + // We've finished copying. If we have a partial frame, update it. |
| + if (frame_offset != 0) { |
| + frames_.insert(make_pair(it->first + frame_offset, |
| + it->second.substr(frame_offset))); |
|
jar (doing other things)
2012/10/31 22:37:37
nit: indent
Ryan Hamilton
2012/11/01 22:52:20
Fixed upstream.
|
| + frames_.erase(frames_.begin()); |
| + num_bytes_consumed_ += frame_offset; |
| + } |
| + return num_bytes_consumed_ - initial_bytes_consumed; |
|
jar (doing other things)
2012/10/31 22:37:37
The header says this should return the number of i
alyssar
2012/11/01 17:43:47
I believe the header should be updated. This is s
Ryan Hamilton
2012/11/01 22:52:20
Fixed upstream.
|
| +} |
| + |
| +bool QuicStreamSequencer::HasBytesToRead() { |
| + FrameMap::iterator it = frames_.begin(); |
| + |
| + return it != frames_.end() && it->first == num_bytes_consumed_; |
| +} |
| + |
| +bool QuicStreamSequencer::IsHalfClosed() { |
| + return num_bytes_consumed_ >= close_offset_; |
| +} |
| + |
| +bool QuicStreamSequencer::IsClosed() { |
| + return num_bytes_consumed_ >= close_offset_ && half_close_ == false; |
| +} |
| + |
| +void QuicStreamSequencer::FlushBufferedFrames() { |
| + FrameMap::iterator it = frames_.find(num_bytes_consumed_); |
| + while (it != frames_.end()) { |
| + DVLOG(1) << "Flushing buffered packet at offset " << it->first; |
| + string* data = &it->second; |
| + size_t bytes_consumed = stream_->ProcessData(data->c_str(), data->size()); |
| + num_bytes_consumed_ += bytes_consumed; |
| + if (MaybeCloseStream()) { |
| + return; |
| + } |
| + if (bytes_consumed > data->size()) { |
| + stream_->Close(QUIC_SERVER_ERROR_PROCESSING_STREAM); // Programming error |
| + return; |
| + } else if (bytes_consumed == data->size()) { |
| + frames_.erase(it); |
| + it = frames_.find(num_bytes_consumed_); |
| + } else { |
| + string new_data = it->second.substr(bytes_consumed); |
| + frames_.erase(it); |
| + frames_.insert(make_pair(num_bytes_consumed_, new_data)); |
|
jar (doing other things)
2012/10/31 22:37:37
I was curious why you used a slightly different pa
Ryan Hamilton
2012/11/01 22:52:20
Do you think something should be changed here? Ca
|
| + return; |
| + } |
| + } |
| +} |
| + |
| +} // namespace net |