OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #ifndef MEDIA_BLINK_MULTIBUFFER_H_ |
| 6 #define MEDIA_BLINK_MULTIBUFFER_H_ |
| 7 |
| 8 #include <stdint.h> |
| 9 |
| 10 #include <limits> |
| 11 #include <map> |
| 12 #include <set> |
| 13 #include <vector> |
| 14 |
| 15 #include "base/callback.h" |
| 16 #include "base/containers/hash_tables.h" |
| 17 #include "base/macros.h" |
| 18 #include "base/memory/ref_counted.h" |
| 19 #include "media/base/data_buffer.h" |
| 20 #include "media/blink/interval_map.h" |
| 21 #include "media/blink/lru.h" |
| 22 #include "media/blink/media_blink_export.h" |
| 23 |
| 24 namespace media { |
| 25 |
| 26 typedef int32_t MultiBufferBlockId; |
| 27 class MultiBuffer; |
| 28 typedef std::pair<MultiBuffer*, MultiBufferBlockId> MultiBufferGlobalBlockId; |
| 29 |
| 30 } // namespace media |
| 31 |
| 32 namespace BASE_HASH_NAMESPACE { |
| 33 |
| 34 template <> |
| 35 struct hash<media::MultiBufferGlobalBlockId> { |
| 36 std::size_t operator()(const media::MultiBufferGlobalBlockId& key) const { |
| 37 // It would be nice if we could use intptr_t instead of int64_t here, but |
| 38 // on some platforms, int64_t is declared as "long" which doesn't match |
| 39 // any of the HashPair() functions. This leads to a compile error since |
| 40 // the compiler can't decide which HashPair() function to call. |
| 41 #if defined(ARCH_CPU_64_BITS) |
| 42 return base::HashPair(reinterpret_cast<int64_t>(key.first), key.second); |
| 43 #else |
| 44 return base::HashPair(reinterpret_cast<int32_t>(key.first), key.second); |
| 45 #endif |
| 46 } |
| 47 }; |
| 48 |
| 49 } // namespace BASE_HASH_NAMESPACE |
| 50 |
| 51 namespace media { |
| 52 |
| 53 // Freeing a lot of blocks can be expensive, to keep thing |
| 54 // flowing smoothly we only free a maximum of |kMaxFreesPerAdd| |
| 55 // blocks when a new block is added to the cache. |
| 56 const int kMaxFreesPerAdd = 10; |
| 57 |
| 58 // There is a simple logic for creating, destroying and deferring |
| 59 // data providers. Every data provider has a look-ahead region and |
| 60 // a look-behind region. If there are readers in the look-ahead |
| 61 // region, we keep reading. If not, but there are readers in the |
| 62 // look-behind region, we defer. If there are no readers in either |
| 63 // region, we destroy the data provider. |
| 64 |
| 65 // When new readers are added, new data providers are created if |
| 66 // the new reader doesn't fall into the look-ahead region of |
| 67 // an existing data provider. |
| 68 |
| 69 // This is the size of the look-ahead region. |
| 70 const int kMaxWaitForWriterOffset = 5; |
| 71 |
| 72 // This is the size of the look-behind region. |
| 73 const int kMaxWaitForReaderOffset = 50; |
| 74 |
| 75 class MultiBuffer; |
| 76 |
| 77 // MultiBuffers are multi-reader multi-writer cache/buffers with |
| 78 // prefetching and pinning. Data is stored internally in ref-counted |
| 79 // blocks of identical size. |block_size_shift| is log2 of the block |
| 80 // size. |
| 81 // |
| 82 // Users should inherit this class and implement CreateWriter(). |
| 83 // TODO(hubbe): Make the multibuffer respond to memory pressure. |
| 84 class MEDIA_BLINK_EXPORT MultiBuffer { |
| 85 public: |
| 86 // Interface for clients wishing to read data out of this cache. |
| 87 // Note: It might look tempting to replace this with a callback, |
| 88 // but we keep and compare pointers to Readers internally. |
| 89 class Reader { |
| 90 public: |
| 91 Reader() {} |
| 92 virtual ~Reader() {} |
| 93 // Notifies the reader that the range of available blocks has changed. |
| 94 // The reader must call MultiBuffer::Observe() to activate this callback. |
| 95 virtual void NotifyAvailableRange( |
| 96 const Interval<MultiBufferBlockId>& range) = 0; |
| 97 |
| 98 private: |
| 99 DISALLOW_COPY_AND_ASSIGN(Reader); |
| 100 }; |
| 101 |
| 102 // DataProvider is the interface that MultiBuffer |
| 103 // uses to get data into the cache. |
| 104 class DataProvider { |
| 105 public: |
| 106 virtual ~DataProvider() {} |
| 107 |
| 108 // Returns the block number that is to be returned |
| 109 // by the next Read() call. |
| 110 virtual MultiBufferBlockId Tell() const = 0; |
| 111 |
| 112 // Returns true if one (or more) blocks are |
| 113 // availble to read. |
| 114 virtual bool Available() const = 0; |
| 115 |
| 116 // Returns the next block. Only valid if Available() |
| 117 // returns true. Last block might be of a smaller size |
| 118 // and after the last block we will get an end-of-stream |
| 119 // DataBuffer. |
| 120 virtual scoped_refptr<DataBuffer> Read() = 0; |
| 121 |
| 122 // |cb| is called every time Available() becomes true. |
| 123 virtual void SetAvailableCallback(const base::Closure& cb) = 0; |
| 124 |
| 125 // Ask the data provider to stop giving us data. |
| 126 // It's ok if the effect is not immediate. |
| 127 virtual void SetDeferred(bool deferred) = 0; |
| 128 }; |
| 129 |
| 130 // Multibuffers use a global shared LRU to free memory. |
| 131 // This effectively means that recently used multibuffers can |
| 132 // borrow memory from less recently used ones. |
| 133 class MEDIA_BLINK_EXPORT GlobalLRU : public base::RefCounted<GlobalLRU> { |
| 134 public: |
| 135 typedef MultiBufferGlobalBlockId GlobalBlockId; |
| 136 GlobalLRU(); |
| 137 |
| 138 // Free elements from cache if needed and possible. |
| 139 // Don't free more than |max_to_free| blocks. |
| 140 // Virtual for testing purposes. |
| 141 void Prune(int64_t max_to_free); |
| 142 |
| 143 void IncrementDataSize(int64_t blocks); |
| 144 void IncrementMaxSize(int64_t blocks); |
| 145 |
| 146 // LRU operations. |
| 147 void Use(MultiBuffer* multibuffer, MultiBufferBlockId id); |
| 148 void Remove(MultiBuffer* multibuffer, MultiBufferBlockId id); |
| 149 void Insert(MultiBuffer* multibuffer, MultiBufferBlockId id); |
| 150 bool Contains(MultiBuffer* multibuffer, MultiBufferBlockId id); |
| 151 int64_t Size() const; |
| 152 |
| 153 private: |
| 154 friend class base::RefCounted<GlobalLRU>; |
| 155 ~GlobalLRU(); |
| 156 |
| 157 // Max number of blocks. |
| 158 int64_t max_size_; |
| 159 |
| 160 // Sum of all multibuffer::data_.size(). |
| 161 int64_t data_size_; |
| 162 |
| 163 // The LRU should contain all blocks which are not pinned from |
| 164 // all multibuffers. |
| 165 LRU<GlobalBlockId> lru_; |
| 166 }; |
| 167 |
| 168 MultiBuffer(int32_t block_size_shift, |
| 169 const scoped_refptr<GlobalLRU>& global_lru); |
| 170 virtual ~MultiBuffer(); |
| 171 |
| 172 // Identifies a block in the cache. |
| 173 // Block numbers can be calculated from byte positions as: |
| 174 // block_num = byte_pos >> block_size_shift |
| 175 typedef MultiBufferBlockId BlockId; |
| 176 typedef base::hash_map<BlockId, scoped_refptr<DataBuffer>> DataMap; |
| 177 |
| 178 // Registers a reader at the given position. |
| 179 // If the cache does not already contain |pos|, it will activate |
| 180 // or create data providers to make sure that the block becomes |
| 181 // available soon. If |pos| is already in the cache, no action is |
| 182 // taken, it simply lets the cache know that this reader is likely |
| 183 // to read pos+1, pos+2.. soon. |
| 184 // |
| 185 // Registered readers will be notified when the available range |
| 186 // at their position changes. The available range at |pos| is a range |
| 187 // from A to B where: A <= |pos|, B >= |pos| and all blocks in [A..B) |
| 188 // are present in the cache. When this changes, we will call |
| 189 // NotifyAvailableRange() on the reader. |
| 190 void AddReader(const BlockId& pos, Reader* reader); |
| 191 |
| 192 // Unregister a reader at block |pos|. |
| 193 // Often followed by a call to AddReader(pos + 1, ...); |
| 194 // Idempotent. |
| 195 void RemoveReader(const BlockId& pos, Reader* reader); |
| 196 |
| 197 // Immediately remove writers at or before |pos| if nobody needs them. |
| 198 // Note that we can't really do this in StopWaitFor(), because it's very |
| 199 // likely that StopWaitFor() is immediately followed by a call to WaitFor(). |
| 200 // It is also a bad idea to wait for the writers to clean themselves up when |
| 201 // they try to provide unwanted data to the cache. Besides the obvoius |
| 202 // inefficiency, it will also cause the http_cache to bypass the disk/memory |
| 203 // cache if we have multiple simultaneous requests going against the same |
| 204 // url. |
| 205 void CleanupWriters(const BlockId& pos); |
| 206 |
| 207 // Returns true if block |pos| is available in the cache. |
| 208 bool Contains(const BlockId& pos) const; |
| 209 |
| 210 // Returns the next unavailable block at or after |pos|. |
| 211 BlockId FindNextUnavailable(const BlockId& pos) const; |
| 212 |
| 213 // Change the pin count for a range of data blocks. |
| 214 // Note that blocks do not have to be present in the |
| 215 // cache to be pinned. |
| 216 // Examples: |
| 217 // Pin block 3, 4 & 5: PinRange(3, 6, 1); |
| 218 // Unpin block 4 & 5: PinRange(4, 6, -1); |
| 219 void PinRange(const BlockId& from, const BlockId& to, int32_t how_much); |
| 220 |
| 221 // Calls PinRange for each range in |ranges|, convenience |
| 222 // function for applying multiple changes to the pinned ranges. |
| 223 void PinRanges(const IntervalMap<BlockId, int32_t>& ranges); |
| 224 |
| 225 // Increment max cache size by |size| (counted in blocks). |
| 226 void IncrementMaxSize(int32_t size); |
| 227 |
| 228 // Caller takes ownership of 'provider', cache will |
| 229 // not call it anymore. |
| 230 scoped_ptr<DataProvider> RemoveProvider(DataProvider* provider); |
| 231 |
| 232 // Add a writer to this cache. Cache takes ownership and |
| 233 // may choose to destroy it. |
| 234 void AddProvider(scoped_ptr<DataProvider> provider); |
| 235 |
| 236 // Transfer all data from |other| to this. |
| 237 void MergeFrom(MultiBuffer* other); |
| 238 |
| 239 // Accessors. |
| 240 const DataMap& map() const { return data_; } |
| 241 int32_t block_size_shift() const { return block_size_shift_; } |
| 242 |
| 243 protected: |
| 244 // Create a new writer at |pos| and return it. |
| 245 // Users needs to implemement this method. |
| 246 virtual DataProvider* CreateWriter(const BlockId& pos) = 0; |
| 247 |
| 248 virtual bool RangeSupported() const = 0; |
| 249 |
| 250 private: |
| 251 // For testing. |
| 252 friend class TestMultiBuffer; |
| 253 |
| 254 enum ProviderState { |
| 255 ProviderStateDead, |
| 256 ProviderStateDefer, |
| 257 ProviderStateLoad |
| 258 }; |
| 259 |
| 260 // Can be overriden for testing. |
| 261 virtual void Prune(size_t max_to_free); |
| 262 |
| 263 // Remove the given blocks from the multibuffer, called from |
| 264 // GlobalLRU::Prune(). |
| 265 void ReleaseBlocks(const std::vector<MultiBufferBlockId>& blocks); |
| 266 |
| 267 // Figure out what state a writer at |pos| should be in. |
| 268 ProviderState SuggestProviderState(const BlockId& pos) const; |
| 269 |
| 270 // Returns true if a writer at |pos| is colliding with |
| 271 // output of another writer. |
| 272 bool ProviderCollision(const BlockId& pos) const; |
| 273 |
| 274 // Call NotifyAvailableRange(new_range) on all readers waiting |
| 275 // for a block in |observer_range| |
| 276 void NotifyAvailableRange(const Interval<MultiBufferBlockId>& observer_range, |
| 277 const Interval<MultiBufferBlockId>& new_range); |
| 278 |
| 279 // Callback which notifies us that a data provider has |
| 280 // some data for us. Also called when it might be apprperiate |
| 281 // for a provider in a deferred state to wake up. |
| 282 void DataProviderEvent(DataProvider* provider); |
| 283 |
| 284 // Max number of blocks. |
| 285 int64_t max_size_; |
| 286 |
| 287 // log2 of block size. |
| 288 int32_t block_size_shift_; |
| 289 |
| 290 // Stores the actual data. |
| 291 DataMap data_; |
| 292 |
| 293 // Keeps track of readers waiting for data. |
| 294 std::map<MultiBufferBlockId, std::set<Reader*>> readers_; |
| 295 |
| 296 // Keeps track of writers by their position. |
| 297 // The writers are owned by this class. |
| 298 // TODO(hubbe): Use ScopedPtrMap here. (must add upper/lower_bound first) |
| 299 std::map<BlockId, DataProvider*> writer_index_; |
| 300 |
| 301 // Gloabally shared LRU, decides which block to free next. |
| 302 scoped_refptr<GlobalLRU> lru_; |
| 303 |
| 304 // Keeps track of what blocks are pinned. If block p is pinned, |
| 305 // then pinned_[p] > 0. Pinned blocks cannot be freed and should not |
| 306 // be present in |lru_|. |
| 307 IntervalMap<BlockId, int32_t> pinned_; |
| 308 |
| 309 // present_[block] should be 1 for all blocks that are present |
| 310 // and 0 for all blocks that are not. Used to quickly figure out |
| 311 // ranges of available/unavailable blocks without iterating. |
| 312 IntervalMap<BlockId, int32_t> present_; |
| 313 }; |
| 314 |
| 315 } // namespace media |
| 316 |
| 317 #endif // MEDIA_BLINK_MULTIBUFFER_H_ |
OLD | NEW |