OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef MEDIA_BLINK_MULTIBUFFER_H_ | |
6 #define MEDIA_BLINK_MULTIBUFFER_H_ | |
7 | |
8 #include <stdint.h> | |
9 | |
10 #include <limits> | |
11 #include <map> | |
12 #include <set> | |
13 #include <vector> | |
14 | |
15 #include "base/callback.h" | |
16 #include "base/containers/hash_tables.h" | |
17 #include "base/macros.h" | |
18 #include "base/memory/ref_counted.h" | |
19 #include "media/base/data_buffer.h" | |
20 #include "media/blink/lru.h" | |
21 #include "media/blink/rangemap.h" | |
22 | |
23 namespace media { | |
24 | |
25 typedef int32_t MultiBufferBlockId; | |
26 class MultiBuffer; | |
27 typedef std::pair<MultiBuffer*, MultiBufferBlockId> MultiBufferGlobalBlockId; | |
28 | |
29 } // namespace media | |
30 | |
31 namespace BASE_HASH_NAMESPACE { | |
32 | |
33 template <> | |
34 struct hash<media::MultiBufferGlobalBlockId> { | |
35 std::size_t operator()(const media::MultiBufferGlobalBlockId& key) const { | |
36 return base::HashPair(reinterpret_cast<uint64>(key.first), key.second); | |
DaleCurtis
2015/10/30 00:24:24
This doesn't seem right, forcing it to a 64-bit al
hubbe
2015/11/02 22:49:53
Guess that's what I get from copying code from som
| |
37 } | |
38 }; | |
39 | |
40 } // namespace BASE_HASH_NAMESPACE | |
41 | |
42 namespace media { | |
43 | |
44 // Freeing a lot of blocks can be expensive, to keep thing | |
45 // flowing smoothly we only free a maximum of |kMaxFreesPerAdd| | |
46 // blocks when a new block is added to the cache. | |
47 const int kMaxFreesPerAdd = 10; | |
48 | |
49 // There is a simple logic for creating, destroying and deferring | |
50 // data providers. Every data provider has a look-ahead region and | |
51 // a look-behind region. If there are readers in the look-ahead | |
52 // region, we keep reading. If not, but there are readers in the | |
53 // look-behind region, we defer. If there are no readers in either | |
54 // region, we destroy the data provider. | |
55 | |
56 // When new readers are added, new data providers are created if | |
57 // the new reader doesn't fall into the look-ahead region of | |
58 // an existing data provider. | |
59 | |
60 // This is the size of the look-ahead region. | |
61 const int kMaxWaitForWriterOffset = 5; | |
62 | |
63 // This is the size of the look-behind region. | |
64 const int kMaxWaitForReaderOffset = 50; | |
65 | |
66 class MultiBuffer; | |
67 | |
68 // MultiBuffers are multi-reader multi-writer cache/buffers with | |
69 // prefetching and pinning. Data is stored internally in ref-counted | |
70 // blocks of identical size. |block_size_shift| is log2 of the block | |
71 // size. | |
72 // | |
73 // Users should inherit this class and implement CreateWriter(). | |
74 // TODO(hubbe): Make the multibuffer respond to memory pressure. | |
75 class MEDIA_EXPORT MultiBuffer { | |
76 public: | |
77 // Interface for clients wishing to read data out of this cache. | |
78 // Note: It might look tempting to replace this with a callback, | |
79 // but we keep and compare pointers to Readers internally. | |
80 class Reader { | |
81 public: | |
82 Reader() {} | |
83 virtual ~Reader() {} | |
84 // Notifies the reader that the range of available blocks has changed. | |
85 // The reader must call MultiBuffer::Observe() to activate this callback. | |
86 virtual void NotifyAvailableRange( | |
87 const Range<MultiBufferBlockId>& range) = 0; | |
88 | |
89 private: | |
90 DISALLOW_COPY_AND_ASSIGN(Reader); | |
91 }; | |
92 | |
93 // DataProvider is the interface that MultiBuffer | |
94 // uses to get data into the cache. | |
95 class DataProvider { | |
96 public: | |
97 virtual ~DataProvider() {} | |
98 | |
99 // Returns the block number that is to be returned | |
100 // by the next Read() call. | |
101 virtual MultiBufferBlockId Tell() const = 0; | |
102 | |
103 // Returns true if one (or more) blocks are | |
104 // availble to read. | |
105 virtual bool Available() const = 0; | |
106 | |
107 // Returns the next block. Only valid if Available() | |
108 // returns true. Last block might be of a smaller size | |
109 // and after the last block we will get an end-of-stream | |
110 // DataBuffer. | |
111 virtual scoped_refptr<DataBuffer> Read() = 0; | |
112 | |
113 // |cb| is called every time Available() becomes true. | |
114 virtual void SetAvailableCallback(const base::Closure& cb) = 0; | |
115 | |
116 // Ask the data provider to stop giving us data. | |
117 // It's ok if the effect is not immediate. | |
118 virtual void SetDeferred(bool deferred) = 0; | |
119 }; | |
120 | |
121 // Multibuffers use a global shared LRU to free memory. | |
122 // This effectively means that recently used multibuffers can | |
123 // borrow memory from less recently used ones. | |
124 class MEDIA_EXPORT GlobalLRU : public base::RefCounted<GlobalLRU> { | |
125 public: | |
126 typedef MultiBufferGlobalBlockId GlobalBlockId; | |
127 GlobalLRU(); | |
128 | |
129 // Free elements from cache if needed and possible. | |
130 // Don't free more than |max_to_free| blocks. | |
131 // Virtual for testing purposes. | |
132 void Prune(size_t max_to_free); | |
133 | |
134 void IncrementDataSize(int64_t blocks); | |
135 void IncrementMaxSize(int64_t blocks); | |
136 | |
137 // LRU operations. | |
138 void Use(MultiBuffer* multibuffer, MultiBufferBlockId id); | |
139 void Remove(MultiBuffer* multibuffer, MultiBufferBlockId id); | |
140 void Insert(MultiBuffer* multibuffer, MultiBufferBlockId id); | |
141 bool Contains(MultiBuffer* multibuffer, MultiBufferBlockId id); | |
142 size_t Size() const; | |
143 | |
144 private: | |
145 friend class base::RefCounted<GlobalLRU>; | |
146 ~GlobalLRU(); | |
147 | |
148 // Max number of blocks. | |
149 int64_t max_size_; | |
150 | |
151 // Sum of all multibuffer::data_.size(). | |
152 int64_t data_size_; | |
153 | |
154 // The LRU should contain all blocks which are not pinned from | |
155 // all multibuffers. | |
156 LRU<GlobalBlockId> lru_; | |
157 }; | |
158 | |
159 MultiBuffer(int32_t block_size_shift, | |
160 const scoped_refptr<GlobalLRU>& global_lru); | |
161 virtual ~MultiBuffer(); | |
162 | |
163 // Identifies a block in the cache. | |
164 // Block numbers can be calculated from byte positions as: | |
165 // block_num = byte_pos >> block_size_shift | |
166 typedef MultiBufferBlockId BlockId; | |
167 typedef base::hash_map<BlockId, scoped_refptr<DataBuffer>> DataMap; | |
168 | |
169 // Registers a reader at the given position. | |
170 // If the cache does not already contain |pos|, it will activate | |
171 // or create data providers to make sure that the block becomes | |
172 // available soon. If |pos| is already in the cache, no action is | |
173 // taken, it simply lets the cache know that this reader is likely | |
174 // to read pos+1, pos+2.. soon. | |
175 // | |
176 // Registered readers will be notified when the available range | |
177 // at their position changes. The available range at |pos| is a range | |
178 // from A to B where: A <= |pos|, B >= |pos| and all blocks in [A..B) | |
179 // are present in the cache. When this changes, we will call | |
180 // NotifyAvailableRange() on the reader. | |
181 void AddReader(const BlockId& pos, Reader* reader); | |
182 | |
183 // Unregister a reader at block |pos|. | |
184 // Often followed by a call to AddReader(pos + 1, ...); | |
185 // Idempotent. | |
186 void RemoveReader(const BlockId& pos, Reader* reader); | |
187 | |
188 // Immediately remove writers at or before |pos| if nobody needs them. | |
189 // Note that we can't really do this in StopWaitFor(), because it's very | |
190 // likely that StopWaitFor() is immediately followed by a call to WaitFor(). | |
191 // It is also a bad idea to wait for the writers to clean themselves up when | |
192 // they try to provide unwanted data to the cache. Besides the obvoius | |
193 // inefficiency, it will also cause the http_cache to bypass the disk/memory | |
194 // cache if we have multiple simultaneous requests going against the same | |
195 // url. | |
196 void CleanupWriters(const BlockId& pos); | |
197 | |
198 // Returns true if block |pos| is available in the cache. | |
199 bool Contains(const BlockId& pos) const; | |
200 | |
201 // Returns the next unavailable block at or after |pos|. | |
202 BlockId FindNextUnavailable(const BlockId& pos) const; | |
203 | |
204 // Change the pin count for a range of data blocks. | |
205 // Note that blocks do not have to be present in the | |
206 // cache to be pinned. | |
207 // Examples: | |
208 // Pin block 3, 4 & 5: PinRange(3, 6, 1); | |
209 // Unpin block 4 & 5: PinRange(4, 6, -1); | |
210 void PinRange(const BlockId& from, const BlockId& to, int32_t how_much); | |
211 | |
212 // Calls PinRange for each range in |ranges|, convenience | |
213 // function for applying multiple changes to the pinned ranges. | |
214 void PinRanges(const RangeMap<BlockId, int32_t>& ranges); | |
215 | |
216 // Increment max cache size by |size| (counted in blocks). | |
217 void IncrementMaxSize(int32_t size); | |
218 | |
219 // Caller takes ownership of 'provider', cache will | |
220 // not call it anymore. | |
221 scoped_ptr<DataProvider> RemoveProvider(DataProvider* provider); | |
222 | |
223 // Add a writer to this cache. Cache takes ownership and | |
224 // may choose to destroy it. | |
225 void AddProvider(scoped_ptr<DataProvider> provider); | |
226 | |
227 // Transfer all data from |other| to this. | |
228 void MergeFrom(MultiBuffer* other); | |
229 | |
230 // Accessors. | |
231 const DataMap& map() const { return data_; } | |
232 int32_t block_size_shift() const { return block_size_shift_; } | |
233 | |
234 protected: | |
235 // Create a new writer at |pos| and return it. | |
236 // Users needs to implemement this method. | |
237 virtual DataProvider* CreateWriter(const BlockId& pos) = 0; | |
238 | |
239 virtual bool RangeSupported() const = 0; | |
240 | |
241 private: | |
242 // For testing. | |
243 friend class TestMultiBuffer; | |
244 | |
245 enum ProviderState { | |
246 ProviderStateDead, | |
247 ProviderStateDefer, | |
248 ProviderStateLoad | |
249 }; | |
250 | |
251 // Can be overriden for testing. | |
252 virtual void Prune(size_t max_to_free); | |
253 | |
254 // Remove the given blocks from the multibuffer, called from | |
255 // GlobalLRU::Prune(). | |
256 void ReleaseBlocks(const std::vector<MultiBufferBlockId> blocks); | |
257 | |
258 // Figure out what state a writer at |pos| should be in. | |
259 ProviderState SuggestProviderState(const BlockId& pos) const; | |
260 | |
261 // Returns true if a writer at |pos| is colliding with | |
262 // output of another writer. | |
263 bool ProviderCollision(const BlockId& pos) const; | |
264 | |
265 // Call NotifyAvailableRange(new_range) on all readers waiting | |
266 // for a block in |observer_range| | |
267 void NotifyAvailableRange(const Range<MultiBufferBlockId>& observer_range, | |
268 const Range<MultiBufferBlockId>& new_range); | |
269 | |
270 // Callback which notifies us that a data provider has | |
271 // some data for us. Also called when it might be apprperiate | |
272 // for a provider in a deferred state to wake up. | |
273 void DataProviderEvent(DataProvider* provider); | |
274 | |
275 // Max number of blocks. | |
276 int64_t max_size_; | |
277 | |
278 // log2 of block size. | |
279 int32_t block_size_shift_; | |
280 | |
281 // Stores the actual data. | |
282 DataMap data_; | |
283 | |
284 // Keeps track of readers waiting for data. | |
285 std::map<MultiBufferBlockId, std::set<Reader*>> readers_; | |
286 | |
287 // Keeps track of writers by their position. | |
288 // The writers are owned by this class. | |
289 std::map<BlockId, DataProvider*> writer_index_; | |
290 | |
291 // Gloabally shared LRU, decides which block to free next. | |
292 scoped_refptr<GlobalLRU> lru_; | |
293 | |
294 // Keeps track of what blocks are pinned. If block p is pinned, | |
295 // then pinned_[p] > 0. Pinned blocks cannot be freed and should not | |
296 // be present in |lru_|. | |
297 RangeMap<BlockId, int32_t> pinned_; | |
298 | |
299 // present_[block] should be 1 for all blocks that are present | |
300 // and 0 for all blocks that are not. Used to quickly figure out | |
301 // ranges of available/unavailable blocks without iterating. | |
302 RangeMap<BlockId, int32_t> present_; | |
303 }; | |
304 | |
305 } // namespace media | |
306 | |
307 #endif // MEDIA_BLINK_MULTIBUFFER_H_ | |
OLD | NEW |