| Index: media/blink/multibuffer.h
|
| diff --git a/media/blink/multibuffer.h b/media/blink/multibuffer.h
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..3e51641b2e635837ee629be829e8b5798e54df86
|
| --- /dev/null
|
| +++ b/media/blink/multibuffer.h
|
| @@ -0,0 +1,376 @@
|
| +// Copyright 2015 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#ifndef MEDIA_BLINK_MULTIBUFFER_H_
|
| +#define MEDIA_BLINK_MULTIBUFFER_H_
|
| +
|
| +#include <stdint.h>
|
| +
|
| +#include <limits>
|
| +#include <map>
|
| +#include <set>
|
| +
|
| +#include "base/callback.h"
|
| +#include "base/memory/ref_counted.h"
|
| +#include "media/base/data_buffer.h"
|
| +#include "media/blink/lru.h"
|
| +#include "media/blink/rangemap.h"
|
| +#include "media/blink/url_index.h"
|
| +
|
| +namespace media {
|
| +
|
| +typedef scoped_refptr<UrlData> MultiBufferUrlData;
|
| +typedef int32_t MultiBufferBlockNum;
|
| +
|
| +const int kMaxFreesPerAdd = 10;
|
| +
|
| +// There is a simple logic for creating, destroying and deferring
|
| +// data providers. Every data provider has a look-ahead region and
|
| +// a look-behind region. If there are readers in the look-ahead
|
| +// region, we keep reading. If not, but there are readers in the
|
| +// look-behind region, we defer. If there are no readers in either
|
| +// region, we destroy the data provider.
|
| +
|
| +// When new readers are added, new data providers are created if
|
| +// the new reader doesn't fall into the look-ahead region of
|
| +// an existing data provider.
|
| +
|
| +// This is the size of the look-ahead region.
|
| +const int kMaxWaitForWriterOffset = 5;
|
| +
|
| +// This is the size of the look-behind region.
|
| +const int kMaxWaitForReaderOffset = 50;
|
| +
|
| +#define kUnknownUrlData nullptr
|
| +
|
| +// This is the key for the multibuffer cache, it's made up of an
|
| +// refcounted UrlData pointer and a block ID.
|
| +// Uninitialized MultiBufferBlockIds will have a url equal to
|
| +// kUnknownUrlData. We also use kUnknownUrlData together with
|
| +// a block id of maxint/minint to represent the maximum and
|
| +// minimum possible MultiBufferBlockId. Examples, in sorted
|
| +// order:
|
| +// { url=kUnknownUrlData, block_num=-maxint } // minimum possible value
|
| +// { url=kUnknownUrlData, block_num=0 } // uninitialized
|
| +// { url=valid, block_num=1 } // valid value
|
| +// { url=valid, block_num=10 } // valid value
|
| +// { url=kUnknownUrlData, block_num=maxint } // maximum possible value
|
| +class MultiBufferBlockId {
|
| + public:
|
| + MultiBufferBlockId();
|
| + MultiBufferBlockId(const MultiBufferBlockId& block_id);
|
| + MultiBufferBlockId(MultiBufferUrlData url_data,
|
| + MultiBufferBlockNum block_num);
|
| + ~MultiBufferBlockId();
|
| + bool SameUrl(const MultiBufferBlockId& other) const {
|
| + return url_data_ == other.url_data_;
|
| + }
|
| + bool operator<(const MultiBufferBlockId& other) const {
|
| + if (SameUrl(other)) {
|
| + return block_num_ < other.block_num_;
|
| + }
|
| + return url_data_cmp() < other.url_data_cmp();
|
| + }
|
| + bool operator>(const MultiBufferBlockId& other) const {
|
| + if (SameUrl(other)) {
|
| + return block_num_ > other.block_num_;
|
| + }
|
| + return url_data_cmp() > other.url_data_cmp();
|
| + }
|
| + bool operator<=(const MultiBufferBlockId& other) const {
|
| + if (SameUrl(other)) {
|
| + return block_num_ <= other.block_num_;
|
| + }
|
| + return url_data_cmp() <= other.url_data_cmp();
|
| + }
|
| + bool operator>=(const MultiBufferBlockId& other) const {
|
| + if (SameUrl(other)) {
|
| + return block_num_ >= other.block_num_;
|
| + }
|
| + return url_data_cmp() >= other.url_data_cmp();
|
| + }
|
| + bool operator==(const MultiBufferBlockId& other) const {
|
| + return SameUrl(other) && block_num_ == other.block_num_;
|
| + }
|
| + bool operator!=(const MultiBufferBlockId& other) const {
|
| + return !SameUrl(other) || block_num_ != other.block_num_;
|
| + }
|
| + int64_t operator-(const MultiBufferBlockId& other) const {
|
| + if (SameUrl(other)) {
|
| + return block_num_ - other.block_num_;
|
| + }
|
| + // Blocks from different URLs are "infinitely" distant.
|
| + return std::numeric_limits<int64_t>::max();
|
| + }
|
| + MultiBufferBlockId operator-(int32_t n) const {
|
| + return MultiBufferBlockId(url_data_, block_num_ - n);
|
| + }
|
| + MultiBufferBlockId operator+(int32_t n) const {
|
| + return MultiBufferBlockId(url_data_, block_num_ + n);
|
| + }
|
| +
|
| + void operator++() {
|
| + block_num_++;
|
| + }
|
| +
|
| + void operator--() {
|
| + block_num_--;
|
| + }
|
| +
|
| + MultiBufferUrlData url_data() const { return url_data_; }
|
| + MultiBufferBlockNum block_num() const { return block_num_; }
|
| +
|
| + private:
|
| + // We represent the minimum block ID as {0, 0} and the
|
| + // maximum as {0, max_int64}. This helper function lets
|
| + // us compare the URLs and get the sort order right.
|
| + uint64_t url_data_cmp() const {
|
| + DCHECK_GE(sizeof(uint64_t), sizeof(UrlData*));
|
| + if (url_data_) return reinterpret_cast<uint64_t>(url_data_.get());
|
| + if (block_num_ == std::numeric_limits<MultiBufferBlockNum>::max())
|
| + return std::numeric_limits<uint64_t>::max();
|
| + return 0;
|
| + }
|
| + MultiBufferUrlData url_data_;
|
| + MultiBufferBlockNum block_num_;
|
| +};
|
| +
|
| +} // namespace media
|
| +
|
| +namespace std {
|
| +ostream& operator<<(ostream& o, const media::MultiBufferBlockId& id);
|
| +
|
| +template<>
|
| +class numeric_limits<media::MultiBufferBlockId> {
|
| + public:
|
| + static media::MultiBufferBlockId min() {
|
| + return media::MultiBufferBlockId(
|
| + kUnknownUrlData,
|
| + numeric_limits<media::MultiBufferBlockNum>::min());
|
| + }
|
| +
|
| + static media::MultiBufferBlockId max() {
|
| + return media::MultiBufferBlockId(
|
| + kUnknownUrlData,
|
| + numeric_limits<media::MultiBufferBlockNum>::max());
|
| + }
|
| +};
|
| +
|
| +} // namespace std
|
| +
|
| +namespace media {
|
| +
|
| +// MultiBuffers are multi-reader multi-writer cache/buffers with
|
| +// prefetching and pinning. Data is stored internally in ref-counted
|
| +// blocks of identical size. |block_size_shift| is log2 of the block
|
| +// size.
|
| +//
|
| +// Users should inherit this class and implement CreateWriter().
|
| +// TODO(hubbe): Make the multibuffer respond to memory pressure.
|
| +class MultiBuffer {
|
| + public:
|
| +
|
| + // Interface for clients wishing to read data out of this cache.
|
| + class Reader {
|
| + public:
|
| + Reader() {}
|
| + virtual ~Reader() {}
|
| + // Tell the reader that we have a new UrlData. There may be several
|
| + // reasons for this:
|
| + // o We encountered a redirect.
|
| + // o An error occured, in this case the new_url_data will be
|
| + // kUnknownUrlData
|
| + // o We discovered a better UrlData. This happens when expired data
|
| + // exists in the cache, but the expired data was found to be still
|
| + // valid after sending the first request.
|
| + // In most cases, the reader will simply update it's internal position
|
| + // and keep reading. (This usually involves calling
|
| + // RemoveReader(old position), AddReader(new position)), but in the case of
|
| + // an error, the reader will either retry or give up and go away.
|
| + virtual void UpdateUrlData(const MultiBufferUrlData& old_url_data,
|
| + const MultiBufferUrlData& new_url_data) = 0;
|
| +
|
| + // Notifies the reader that the range of available blocks has changed.
|
| + // The reader must call MultiBuffer::Observe() to activate this callback.
|
| + virtual void NotifyAvailableRange(
|
| + const Range<MultiBufferBlockId>& range) = 0;
|
| + private:
|
| + DISALLOW_COPY_AND_ASSIGN(Reader);
|
| + };
|
| +
|
| + // DataProvider is the interface that MultiBuffer
|
| + // uses to get data into the cache.
|
| + class DataProvider {
|
| + public:
|
| + virtual ~DataProvider() {}
|
| +
|
| + // Returns the block number that is to be returned
|
| + // by the next Read() call.
|
| + virtual MultiBufferBlockId Tell() const = 0;
|
| +
|
| + // Returns true if one (or more) blocks are
|
| + // availble to read.
|
| + virtual bool Available() const = 0;
|
| +
|
| + // Returns the next block. Only valid if Available()
|
| + // returns true. Last block might be of a smaller size
|
| + // and after the last block we will get an end-of-stream
|
| + // DataBuffer.
|
| + virtual scoped_refptr<DataBuffer> Read() = 0;
|
| +
|
| + // |cb| is called every time Available() becomes true.
|
| + virtual void SetAvailableCallback(const base::Closure& cb) = 0;
|
| +
|
| + // Ask the data provider to stop giving us data.
|
| + // It's ok if the effect is not immediate.
|
| + virtual void SetDeferred(bool deferred) = 0;
|
| + };
|
| +
|
| + explicit MultiBuffer(int32_t block_size_shift);
|
| + virtual ~MultiBuffer();
|
| +
|
| + // Identifies a block in the cache.
|
| + // Block numbers can be calculated from byte positions as:
|
| + // block_num = byte_pos >> block_size_shift
|
| + typedef MultiBufferBlockId BlockId;
|
| + typedef std::map<BlockId, scoped_refptr<DataBuffer> > DataMap;
|
| +
|
| + // Registers a reader at the given position.
|
| + // If the cache does not already contain |pos|, it will activate
|
| + // or create data providers to make sure that the block becomes
|
| + // available soon. If |pos| is already in the cache, no action is
|
| + // taken, it simply lets the cache know that this reader is likely
|
| + // to read pos+1, pos+2.. soon.
|
| + //
|
| + // Registered readers will be notified when the available range
|
| + // at their position changes. The available range at |pos| is a range
|
| + // from A to B where: A <= |pos|, B >= |pos| and all blocks in [A..B)
|
| + // are present in the cache. When this changes, we will call
|
| + // NotifyAvailableRange() on the reader.
|
| + void AddReader(const BlockId& pos, Reader* reader);
|
| +
|
| + // Unregister a reader at block |pos|.
|
| + // Often followed by a call to AddReader(pos + 1, ...);
|
| + // Idempotent.
|
| + void RemoveReader(const BlockId& pos, Reader* reader);
|
| +
|
| + // Immediately remove writers at or before |pos| if nobody needs them.
|
| + // Note that we can't really do this in StopWaitFor(), because it's very
|
| + // likely that StopWaitFor() is immediately followed by a call to WaitFor().
|
| + // It is also a bad idea to wait for the writers to clean themselves up when
|
| + // they try to provide unwanted data to the cache. Besides the obvoius
|
| + // inefficiency, it will also cause the http_cache to bypass the disk/memory
|
| + // cache if we have multiple simultaneous requests going against the same
|
| + // url.
|
| + void CleanupWriters(const BlockId& pos);
|
| +
|
| + // Returns true if block |pos| is available in the cache.
|
| + bool Contains(const BlockId& pos) const;
|
| +
|
| + // Returns the next unavailable block at or after |pos|.
|
| + BlockId FindNextUnavailable(const BlockId& pos) const;
|
| +
|
| + // Change the pin count for a range of data blocks.
|
| + // Note that blocks do not have to be present in the
|
| + // cache to be pinned.
|
| + // Examples:
|
| + // Pin block 3, 4 & 5: PinRange(3, 6, 1);
|
| + // Unpin block 4 & 5: PinRange(4, 6, -1);
|
| + void PinRange(const BlockId& from, const BlockId& to, int32_t howmuch);
|
| +
|
| + // Calls PinRange for each range in |ranges|, convenience
|
| + // function for applying multiple changes to the pinned ranges.
|
| + void PinRanges(const RangeMap<BlockId, int32_t>& ranges);
|
| +
|
| + // Increment max cache size by |size| (counted in blocks).
|
| + void IncrementMaxSize(int32_t size);
|
| +
|
| + // Caller takes ownership of 'provider', cache will
|
| + // not call it anymore.
|
| + scoped_ptr<DataProvider> RemoveProvider(DataProvider* provider);
|
| +
|
| + // Add a writer to this cache. Cache takes ownership and
|
| + // may choose to destroy it.
|
| + void AddProvider(scoped_ptr<DataProvider> provider);
|
| +
|
| + // Tell all reader workin on |old_url_data| about a new url.
|
| + // See Reader::UpdateUrlData for more info.
|
| + void UpdateUrlData(const MultiBufferUrlData& old_url_data,
|
| + const MultiBufferUrlData& new_url_data);
|
| +
|
| + // Accessors.
|
| + const DataMap& map() const { return data_; }
|
| + int32_t block_size_shift() const { return block_size_shift_; }
|
| +
|
| + protected:
|
| + // Create a new writer at |pos| and return it.
|
| + // Users needs to implemement this method.
|
| + virtual DataProvider* CreateWriter(const BlockId& pos) = 0;
|
| +
|
| + private:
|
| + // For testing.
|
| + friend class TestMultiBuffer;
|
| +
|
| + enum ProviderState {
|
| + ProviderStateDead,
|
| + ProviderStateDefer,
|
| + ProviderStateLoad
|
| + };
|
| +
|
| + // Figure out what state a writer at |pos| should be in.
|
| + ProviderState SuggestProviderState(const BlockId& pos) const;
|
| +
|
| + // Returns true if a writer at |pos| is colliding with
|
| + // output of another writer.
|
| + bool ProviderCollision(const BlockId& pos) const;
|
| +
|
| + // Call NotifyAvailableRange(new_range) on all readers waiting
|
| + // for a block in |observer_range|
|
| + void NotifyAvailableRange(
|
| + const Range<MultiBufferBlockId>& observer_range,
|
| + const Range<MultiBufferBlockId>& new_range);
|
| +
|
| + // Callback which notifies us that a data provider has
|
| + // some data for us. Also called when it might be apprperiate
|
| + // for a provider in a deferred state to wake up.
|
| + void DataProviderEvent(DataProvider *provider);
|
| +
|
| + // Free elements from cache if needed and possible.
|
| + // Don't free more than |max_to_free| blocks.
|
| + // Virtual for testing purposes.
|
| + virtual void Prune(size_t max_to_free);
|
| +
|
| + // Max number of blocks.
|
| + size_t max_size_;
|
| +
|
| + // log2 of block size.
|
| + int32_t block_size_shift_;
|
| +
|
| + // Stores the actual data.
|
| + DataMap data_;
|
| +
|
| + // Keeps track of readers waiting for data.
|
| + std::map<MultiBufferBlockId, std::set<Reader*> > readers_;
|
| +
|
| + // Keeps track of writers by their position.
|
| + // The writers are owned by this class.
|
| + std::map<BlockId, DataProvider*> writer_index_;
|
| +
|
| + // The LRU should contain all blocks which are not pinned.
|
| + LRU<BlockId> lru_;
|
| +
|
| + // Keeps track of what blocks are pinned. If block p is pinned,
|
| + // then pinned_[p] > 0. Pinned blocks cannot be freed and should not
|
| + // be present in |lru_|.
|
| + RangeMap<BlockId, int32_t> pinned_;
|
| +
|
| + // present_[block] should be 1 for all blocks that are present
|
| + // and 0 for all blocks that are not. Used to quickly figure out
|
| + // ranges of available blocks without iterating.
|
| + RangeMap<BlockId, int32_t> present_;
|
| +};
|
| +
|
| +} // namespace media
|
| +
|
| +#endif // MEDIA_BLINK_MULTIBUFFER_H_
|
|
|