Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(147)

Side by Side Diff: media/blink/multibuffer.h

Issue 1165903002: Multi reader/writer cache/buffer (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: merged Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef MEDIA_BLINK_MULTIBUFFER_H_
6 #define MEDIA_BLINK_MULTIBUFFER_H_
7
8 #include <stdint.h>
9
10 #include <limits>
11 #include <map>
12 #include <set>
13 #include <vector>
14
15 #include "base/callback.h"
16 #include "base/containers/hash_tables.h"
17 #include "base/macros.h"
18 #include "base/memory/ref_counted.h"
19 #include "media/base/data_buffer.h"
20 #include "media/blink/lru.h"
21 #include "media/blink/media_blink_export.h"
22 #include "media/blink/rangemap.h"
23
24 namespace media {
25
26 typedef int32_t MultiBufferBlockId;
27 class MultiBuffer;
28 typedef std::pair<MultiBuffer*, MultiBufferBlockId> MultiBufferGlobalBlockId;
29
30 } // namespace media
31
32 namespace BASE_HASH_NAMESPACE {
33
34 template <>
35 struct hash<media::MultiBufferGlobalBlockId> {
36 std::size_t operator()(const media::MultiBufferGlobalBlockId& key) const {
37 // It would be nice if we could use intptr_t instead of int64_t here, but
38 // on some platforms, int64_t is declared as "long" which doesn't match
39 // any of the HashPair() functions. This leads to a compile error since
40 // the compiler can't decide which HashPair() function to call.
41 return base::HashPair(reinterpret_cast<int64_t>(key.first), key.second);
DaleCurtis 2015/11/11 23:48:17 intptr_t ?
hubbe 2015/11/12 22:18:37 Did you read the comment?
DaleCurtis 2015/11/12 23:22:03 No, sorry! Does uintptr_t work?
hubbe 2015/11/12 23:34:40 No, same problem.
DaleCurtis 2015/11/12 23:43:14 #ifdef ARCH_CPU_64_BITS ?
hubbe 2015/11/13 01:24:25 Should work, done.
42 }
43 };
44
45 } // namespace BASE_HASH_NAMESPACE
46
47 namespace media {
48
49 // Freeing a lot of blocks can be expensive, to keep thing
50 // flowing smoothly we only free a maximum of |kMaxFreesPerAdd|
51 // blocks when a new block is added to the cache.
52 const int kMaxFreesPerAdd = 10;
53
54 // There is a simple logic for creating, destroying and deferring
55 // data providers. Every data provider has a look-ahead region and
56 // a look-behind region. If there are readers in the look-ahead
57 // region, we keep reading. If not, but there are readers in the
58 // look-behind region, we defer. If there are no readers in either
59 // region, we destroy the data provider.
60
61 // When new readers are added, new data providers are created if
62 // the new reader doesn't fall into the look-ahead region of
63 // an existing data provider.
64
65 // This is the size of the look-ahead region.
66 const int kMaxWaitForWriterOffset = 5;
67
68 // This is the size of the look-behind region.
69 const int kMaxWaitForReaderOffset = 50;
70
71 class MultiBuffer;
72
73 // MultiBuffers are multi-reader multi-writer cache/buffers with
74 // prefetching and pinning. Data is stored internally in ref-counted
75 // blocks of identical size. |block_size_shift| is log2 of the block
76 // size.
77 //
78 // Users should inherit this class and implement CreateWriter().
79 // TODO(hubbe): Make the multibuffer respond to memory pressure.
80 class MEDIA_BLINK_EXPORT MultiBuffer {
81 public:
82 // Interface for clients wishing to read data out of this cache.
83 // Note: It might look tempting to replace this with a callback,
84 // but we keep and compare pointers to Readers internally.
85 class Reader {
86 public:
87 Reader() {}
88 virtual ~Reader() {}
89 // Notifies the reader that the range of available blocks has changed.
90 // The reader must call MultiBuffer::Observe() to activate this callback.
91 virtual void NotifyAvailableRange(
92 const Range<MultiBufferBlockId>& range) = 0;
93
94 private:
95 DISALLOW_COPY_AND_ASSIGN(Reader);
96 };
97
98 // DataProvider is the interface that MultiBuffer
99 // uses to get data into the cache.
100 class DataProvider {
101 public:
102 virtual ~DataProvider() {}
103
104 // Returns the block number that is to be returned
105 // by the next Read() call.
106 virtual MultiBufferBlockId Tell() const = 0;
107
108 // Returns true if one (or more) blocks are
109 // availble to read.
110 virtual bool Available() const = 0;
111
112 // Returns the next block. Only valid if Available()
113 // returns true. Last block might be of a smaller size
114 // and after the last block we will get an end-of-stream
115 // DataBuffer.
116 virtual scoped_refptr<DataBuffer> Read() = 0;
117
118 // |cb| is called every time Available() becomes true.
119 virtual void SetAvailableCallback(const base::Closure& cb) = 0;
120
121 // Ask the data provider to stop giving us data.
122 // It's ok if the effect is not immediate.
123 virtual void SetDeferred(bool deferred) = 0;
124 };
125
126 // Multibuffers use a global shared LRU to free memory.
127 // This effectively means that recently used multibuffers can
128 // borrow memory from less recently used ones.
129 class MEDIA_BLINK_EXPORT GlobalLRU : public base::RefCounted<GlobalLRU> {
130 public:
131 typedef MultiBufferGlobalBlockId GlobalBlockId;
132 GlobalLRU();
133
134 // Free elements from cache if needed and possible.
135 // Don't free more than |max_to_free| blocks.
136 // Virtual for testing purposes.
137 void Prune(int64_t max_to_free);
138
139 void IncrementDataSize(int64_t blocks);
140 void IncrementMaxSize(int64_t blocks);
141
142 // LRU operations.
143 void Use(MultiBuffer* multibuffer, MultiBufferBlockId id);
144 void Remove(MultiBuffer* multibuffer, MultiBufferBlockId id);
145 void Insert(MultiBuffer* multibuffer, MultiBufferBlockId id);
146 bool Contains(MultiBuffer* multibuffer, MultiBufferBlockId id);
147 int64_t Size() const;
148
149 private:
150 friend class base::RefCounted<GlobalLRU>;
151 ~GlobalLRU();
152
153 // Max number of blocks.
154 int64_t max_size_;
155
156 // Sum of all multibuffer::data_.size().
157 int64_t data_size_;
158
159 // The LRU should contain all blocks which are not pinned from
160 // all multibuffers.
161 LRU<GlobalBlockId> lru_;
162 };
163
164 MultiBuffer(int32_t block_size_shift,
165 const scoped_refptr<GlobalLRU>& global_lru);
166 virtual ~MultiBuffer();
167
168 // Identifies a block in the cache.
169 // Block numbers can be calculated from byte positions as:
170 // block_num = byte_pos >> block_size_shift
171 typedef MultiBufferBlockId BlockId;
172 typedef base::hash_map<BlockId, scoped_refptr<DataBuffer>> DataMap;
173
174 // Registers a reader at the given position.
175 // If the cache does not already contain |pos|, it will activate
176 // or create data providers to make sure that the block becomes
177 // available soon. If |pos| is already in the cache, no action is
178 // taken, it simply lets the cache know that this reader is likely
179 // to read pos+1, pos+2.. soon.
180 //
181 // Registered readers will be notified when the available range
182 // at their position changes. The available range at |pos| is a range
183 // from A to B where: A <= |pos|, B >= |pos| and all blocks in [A..B)
184 // are present in the cache. When this changes, we will call
185 // NotifyAvailableRange() on the reader.
186 void AddReader(const BlockId& pos, Reader* reader);
187
188 // Unregister a reader at block |pos|.
189 // Often followed by a call to AddReader(pos + 1, ...);
190 // Idempotent.
191 void RemoveReader(const BlockId& pos, Reader* reader);
192
193 // Immediately remove writers at or before |pos| if nobody needs them.
194 // Note that we can't really do this in StopWaitFor(), because it's very
195 // likely that StopWaitFor() is immediately followed by a call to WaitFor().
196 // It is also a bad idea to wait for the writers to clean themselves up when
197 // they try to provide unwanted data to the cache. Besides the obvoius
198 // inefficiency, it will also cause the http_cache to bypass the disk/memory
199 // cache if we have multiple simultaneous requests going against the same
200 // url.
201 void CleanupWriters(const BlockId& pos);
202
203 // Returns true if block |pos| is available in the cache.
204 bool Contains(const BlockId& pos) const;
205
206 // Returns the next unavailable block at or after |pos|.
207 BlockId FindNextUnavailable(const BlockId& pos) const;
208
209 // Change the pin count for a range of data blocks.
210 // Note that blocks do not have to be present in the
211 // cache to be pinned.
212 // Examples:
213 // Pin block 3, 4 & 5: PinRange(3, 6, 1);
214 // Unpin block 4 & 5: PinRange(4, 6, -1);
215 void PinRange(const BlockId& from, const BlockId& to, int32_t how_much);
216
217 // Calls PinRange for each range in |ranges|, convenience
218 // function for applying multiple changes to the pinned ranges.
219 void PinRanges(const RangeMap<BlockId, int32_t>& ranges);
220
221 // Increment max cache size by |size| (counted in blocks).
222 void IncrementMaxSize(int32_t size);
223
224 // Caller takes ownership of 'provider', cache will
225 // not call it anymore.
226 scoped_ptr<DataProvider> RemoveProvider(DataProvider* provider);
227
228 // Add a writer to this cache. Cache takes ownership and
229 // may choose to destroy it.
230 void AddProvider(scoped_ptr<DataProvider> provider);
231
232 // Transfer all data from |other| to this.
233 void MergeFrom(MultiBuffer* other);
234
235 // Accessors.
236 const DataMap& map() const { return data_; }
237 int32_t block_size_shift() const { return block_size_shift_; }
238
239 protected:
240 // Create a new writer at |pos| and return it.
241 // Users needs to implemement this method.
242 virtual DataProvider* CreateWriter(const BlockId& pos) = 0;
243
244 virtual bool RangeSupported() const = 0;
245
246 private:
247 // For testing.
248 friend class TestMultiBuffer;
249
250 enum ProviderState {
251 ProviderStateDead,
252 ProviderStateDefer,
253 ProviderStateLoad
254 };
255
256 // Can be overriden for testing.
257 virtual void Prune(size_t max_to_free);
258
259 // Remove the given blocks from the multibuffer, called from
260 // GlobalLRU::Prune().
261 void ReleaseBlocks(const std::vector<MultiBufferBlockId> blocks);
262
263 // Figure out what state a writer at |pos| should be in.
264 ProviderState SuggestProviderState(const BlockId& pos) const;
265
266 // Returns true if a writer at |pos| is colliding with
267 // output of another writer.
268 bool ProviderCollision(const BlockId& pos) const;
269
270 // Call NotifyAvailableRange(new_range) on all readers waiting
271 // for a block in |observer_range|
272 void NotifyAvailableRange(const Range<MultiBufferBlockId>& observer_range,
273 const Range<MultiBufferBlockId>& new_range);
274
275 // Callback which notifies us that a data provider has
276 // some data for us. Also called when it might be apprperiate
277 // for a provider in a deferred state to wake up.
278 void DataProviderEvent(DataProvider* provider);
279
280 // Max number of blocks.
281 int64_t max_size_;
282
283 // log2 of block size.
284 int32_t block_size_shift_;
285
286 // Stores the actual data.
287 DataMap data_;
288
289 // Keeps track of readers waiting for data.
290 std::map<MultiBufferBlockId, std::set<Reader*>> readers_;
291
292 // Keeps track of writers by their position.
293 // The writers are owned by this class.
294 std::map<BlockId, DataProvider*> writer_index_;
DaleCurtis 2015/11/11 23:48:17 ScopedPtrMap?
hubbe 2015/11/12 22:18:37 I'd need to fix ScopedPtrMap to expose upper_bound
DaleCurtis 2015/11/12 23:22:54 Hmm, does it not work with std::lower_bound, std::
hubbe 2015/11/12 23:34:40 It might, but the performance would suffer since s
295
296 // Gloabally shared LRU, decides which block to free next.
297 scoped_refptr<GlobalLRU> lru_;
298
299 // Keeps track of what blocks are pinned. If block p is pinned,
300 // then pinned_[p] > 0. Pinned blocks cannot be freed and should not
301 // be present in |lru_|.
302 RangeMap<BlockId, int32_t> pinned_;
303
304 // present_[block] should be 1 for all blocks that are present
305 // and 0 for all blocks that are not. Used to quickly figure out
306 // ranges of available/unavailable blocks without iterating.
307 RangeMap<BlockId, int32_t> present_;
308 };
309
310 } // namespace media
311
312 #endif // MEDIA_BLINK_MULTIBUFFER_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698