Chromium Code Reviews| Index: common/chunkstream/buffer.go |
| diff --git a/common/chunkstream/buffer.go b/common/chunkstream/buffer.go |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..a5d0e213516e7bc5a0f7a2716cc815b196018a3c |
| --- /dev/null |
| +++ b/common/chunkstream/buffer.go |
| @@ -0,0 +1,158 @@ |
| +// Copyright 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +package chunkstream |
| + |
| +// Buffer is a collection of ordered Chunks that can cheaply read and shifted |
| +// as if it were a continuous byte stream. |
| +// |
| +// A Buffer is not goroutine-safe. However, data can be Append()ed to a Buffer |
| +// without affecting Readers that were previously generated from it so long as |
| +// no data is Consume()d from the Buffer. The act of consuming data invalidates |
| +// any Reader state, and using a Reader constructed prior to a Consume() call |
| +// after the Consume() call is completed is an error and may result in a panic. |
|
iannucci
2015/11/05 01:10:07
So I think the following operations are mutually e
dnj
2015/11/13 23:22:03
Yep. I'll update the documentation to be more clea
|
| +type Buffer struct { |
| + // First is a pointer to the first Chunk node in the buffer. |
| + first *chunkNode |
| + // Last is a pointer to the last Chunk node in the buffer. |
| + last *chunkNode |
| + |
| + // size is the total number of bytes in the Buffer. |
| + size int64 |
| + |
| + // fidx is the current byte offset in first. |
| + fidx int |
| + |
| + // gen is a monotonically-increasing generation number. It increases every |
| + // time data is consumed from the Buffer via Consume. |
| + gen int64 |
| +} |
| + |
| +// Append adds another Chunk to the buffer. |
|
iannucci
2015/11/05 01:10:07
another Chunk -> one or more Chunks
dnj
2015/11/13 23:22:03
Done.
|
| +// |
| +// After completion, the Chunk is now owned by the Buffer and should not be used |
| +// anymore externally. |
| +func (b *Buffer) Append(c ...Chunk) { |
| + for _, chunk := range c { |
| + b.appendChunk(chunk) |
| + } |
| +} |
| + |
| +func (b *Buffer) appendChunk(c Chunk) { |
| + // Ignore/discard zero-length data. |
| + if c.Len() == 0 { |
| + c.Release() |
| + return |
| + } |
| + |
| + cn := newChunkNode(c) |
| + cn.next = nil |
| + if b.last == nil { |
| + // First node. |
| + b.first = cn |
| + } else { |
| + b.last.next = cn |
| + } |
| + b.last = cn |
| + b.size += int64(c.Len()) |
| +} |
| + |
| +// Bytes constructs a byte slice containing the contents of the Buffer. |
| +// |
| +// This is a potentially expensive operation, and should generally be used only |
| +// for debugging and tests, as it defeats most of the purpose of this package. |
| +func (b *Buffer) Bytes() []byte { |
| + if b.Len() == 0 { |
| + return nil |
| + } |
| + |
| + m := make([]byte, 0, b.Len()) |
| + idx := b.fidx |
| + for cur := b.first; cur != nil; cur = cur.next { |
| + m = append(m, cur.Bytes()[idx:]...) |
| + idx = 0 |
| + } |
| + return m |
| +} |
| + |
| +// Len returns the total amount of data in the buffer. |
| +func (b *Buffer) Len() int64 { |
| + return b.size |
| +} |
| + |
| +// FirstChunk returns the first Chunk in the Buffer, or nil if the Buffer has |
| +// no Chunks. |
| +func (b *Buffer) FirstChunk() Chunk { |
| + if b.first == nil { |
| + return nil |
| + } |
| + return b.first.Chunk |
| +} |
| + |
| +// Reader returns a Reader instance bound to this Buffer. |
| +// |
| +// If the limit is greater than zero, the Reader's limit will be set. |
|
iannucci
2015/11/05 01:10:07
I think this docstring is stale.
dnj
2015/11/13 23:22:03
Done.
|
| +// |
| +// The Reader is no longer valid after Consume is called. |
| +func (b *Buffer) Reader() *Reader { |
| + return b.ReaderLimit(b.size) |
| +} |
| + |
| +// ReaderLimit constructs a Reader instance, but artifically constrains it to |
| +// read at most the specified number of bytes. |
| +// |
| +// This is useful when reading a subset of the data into a Buffer, as ReadFrom |
| +// does not allow a size to be specified. |
| +func (b *Buffer) ReaderLimit(limit int64) *Reader { |
| + if limit > b.size { |
| + limit = b.size |
|
iannucci
2015/11/05 01:10:07
hm.. why? what if you plan to Append stuff, since
dnj
2015/11/13 23:22:03
Clearer with "View", but it's basically a snapshot
|
| + } |
| + |
| + return &Reader{ |
| + cur: b.first, |
| + cidx: b.fidx, |
| + size: limit, |
| + |
| + gen: b.gen, |
| + b: b, |
| + } |
| +} |
| + |
| +// Consume removes the specified number of bytes from the beginning of the |
| +// Buffer. If Consume skips past all of the data in a Chunk is no longer needed, |
| +// it is Release()d. |
| +// |
| +// Consume invalidates any current Reader instances. Using a Reader generated |
| +// from before a Consume call is an error and will result in a panic. |
|
iannucci
2015/11/05 01:10:07
nice! glad to see this is enforced with gen :)
dnj
2015/11/13 23:22:03
Removed gen. :(
|
| +func (b *Buffer) Consume(c int64) { |
| + if c == 0 { |
| + return |
| + } |
| + |
| + if c > b.size { |
| + panic("consuming more data than available") |
| + } |
| + b.size -= c |
| + |
| + for c > 0 { |
| + // Do we consume the entire chunk? |
| + if int64(b.first.Len()-b.fidx) > c { |
| + // No. Advance our chunk index and terminate. |
| + b.fidx += int(c) |
| + break |
| + } |
| + |
| + n := b.first |
| + c -= int64(n.Len() - b.fidx) |
| + b.first = n.next |
| + b.fidx = 0 |
| + if b.first == nil { |
| + b.last = nil |
| + } |
| + |
| + // Release our node. We must not reference it after this. |
| + n.release() |
| + } |
| + b.gen++ |
| +} |