OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "media/blink/multibuffer.h" | |
6 | |
7 #include "base/bind.h" | |
8 | |
9 namespace media { | |
10 | |
11 // Returns the block ID closest to (but less or equal than) |pos| from |index|. | |
12 template <class T> | |
13 static MultiBuffer::BlockId ClosestPreviousEntry( | |
14 const std::map<MultiBuffer::BlockId, T>& index, | |
15 MultiBuffer::BlockId pos) { | |
16 auto i = index.upper_bound(pos); | |
17 DCHECK(i == index.end() || i->first > pos); | |
18 if (i == index.begin()) { | |
19 return std::numeric_limits<MultiBufferBlockId>::min(); | |
20 } | |
21 --i; | |
22 DCHECK_LE(i->first, pos); | |
23 return i->first; | |
24 } | |
25 | |
26 // Returns the block ID closest to (but greter than or equal to) |pos| | |
27 // from |index|. | |
28 template <class T> | |
29 static MultiBuffer::BlockId ClosestNextEntry( | |
30 const std::map<MultiBuffer::BlockId, T>& index, | |
31 MultiBuffer::BlockId pos) { | |
32 auto i = index.lower_bound(pos); | |
33 if (i == index.end()) { | |
34 return std::numeric_limits<MultiBufferBlockId>::max(); | |
35 } | |
36 DCHECK_GE(i->first, pos); | |
37 return i->first; | |
38 } | |
39 | |
40 // | |
41 // MultiBuffer::GlobalLRU | |
42 // | |
43 MultiBuffer::GlobalLRU::GlobalLRU() : max_size_(0), data_size_(0) {} | |
44 | |
45 MultiBuffer::GlobalLRU::~GlobalLRU() { | |
46 // By the time we're freed, all blocks should have been removed, | |
47 // and our sums should be zero. | |
48 DCHECK(lru_.Empty()); | |
49 DCHECK_EQ(max_size_, 0); | |
50 DCHECK_EQ(data_size_, 0); | |
51 } | |
52 | |
53 void MultiBuffer::GlobalLRU::Use(MultiBuffer* multibuffer, | |
54 MultiBufferBlockId block_id) { | |
55 GlobalBlockId id(multibuffer, block_id); | |
56 lru_.Use(id); | |
57 } | |
58 | |
59 void MultiBuffer::GlobalLRU::Insert(MultiBuffer* multibuffer, | |
60 MultiBufferBlockId block_id) { | |
61 GlobalBlockId id(multibuffer, block_id); | |
62 lru_.Insert(id); | |
63 } | |
64 | |
65 void MultiBuffer::GlobalLRU::Remove(MultiBuffer* multibuffer, | |
66 MultiBufferBlockId block_id) { | |
67 GlobalBlockId id(multibuffer, block_id); | |
68 lru_.Remove(id); | |
69 } | |
70 | |
71 bool MultiBuffer::GlobalLRU::Contains(MultiBuffer* multibuffer, | |
72 MultiBufferBlockId block_id) { | |
73 GlobalBlockId id(multibuffer, block_id); | |
74 return lru_.Contains(id); | |
75 } | |
76 | |
77 void MultiBuffer::GlobalLRU::IncrementDataSize(int64_t blocks) { | |
78 data_size_ += blocks; | |
79 DCHECK_GE(data_size_, 0); | |
80 } | |
81 | |
82 void MultiBuffer::GlobalLRU::IncrementMaxSize(int64_t blocks) { | |
83 max_size_ += blocks; | |
84 DCHECK_GE(max_size_, 0); | |
85 } | |
86 | |
87 void MultiBuffer::GlobalLRU::Prune(int64_t max_to_free) { | |
88 // We group the blocks by multibuffer so that we can free as many blocks as | |
89 // possible in one call. This reduces the number of callbacks to clients | |
90 // when their available ranges change. | |
91 std::map<MultiBuffer*, std::vector<MultiBufferBlockId>> to_free; | |
92 int64_t freed = 0; | |
93 while (data_size_ - freed > max_size_ && !lru_.Empty() && | |
94 freed < max_to_free) { | |
95 GlobalBlockId block_id = lru_.Pop(); | |
96 to_free[block_id.first].push_back(block_id.second); | |
97 freed++; | |
98 } | |
99 for (const auto& to_free_pair : to_free) { | |
100 to_free_pair.first->ReleaseBlocks(to_free_pair.second); | |
101 } | |
102 } | |
103 | |
104 int64_t MultiBuffer::GlobalLRU::Size() const { | |
105 return lru_.Size(); | |
106 } | |
107 | |
108 // | |
109 // MultiBuffer | |
110 // | |
111 MultiBuffer::MultiBuffer(int32_t block_size_shift, | |
112 const scoped_refptr<GlobalLRU>& global_lru) | |
113 : max_size_(0), block_size_shift_(block_size_shift), lru_(global_lru) {} | |
114 | |
115 MultiBuffer::~MultiBuffer() { | |
116 // Delete all writers. | |
117 for (const auto& i : writer_index_) { | |
118 delete i.second; | |
119 } | |
120 // Remove all blocks from the LRU. | |
121 for (const auto& i : data_) { | |
122 lru_->Remove(this, i.first); | |
123 } | |
124 lru_->IncrementDataSize(-static_cast<int64_t>(data_.size())); | |
125 lru_->IncrementMaxSize(-max_size_); | |
126 } | |
127 | |
128 void MultiBuffer::AddReader(const BlockId& pos, Reader* reader) { | |
129 std::set<Reader*>& set_of_readers = readers_[pos]; | |
DaleCurtis
2015/11/11 23:48:17
Is & what you wanted here?
hubbe
2015/11/12 22:18:37
Converted to pointer.
| |
130 bool already_waited_for = !set_of_readers.empty(); | |
131 set_of_readers.insert(reader); | |
132 | |
133 if (already_waited_for || Contains(pos)) { | |
134 return; | |
135 } | |
136 | |
137 // We may need to create a new data provider to service this request. | |
138 // Look for an existing data provider first. | |
139 DataProvider* provider = NULL; | |
DaleCurtis
2015/11/11 23:48:17
nullptr
hubbe
2015/11/12 22:18:37
Done.
| |
140 BlockId closest_writer = ClosestPreviousEntry(writer_index_, pos); | |
141 | |
142 if (closest_writer > pos - kMaxWaitForWriterOffset) { | |
143 auto i = present_.find(pos); | |
144 BlockId closest_block; | |
145 if (i.value()) { | |
146 // Shouldn't happen, we already tested that Contains(pos) is true. | |
147 NOTREACHED(); | |
148 closest_block = pos; | |
149 } else if (i == present_.begin()) { | |
150 closest_block = -1; | |
151 } else { | |
152 closest_block = i.range_begin() - 1; | |
153 } | |
154 | |
155 // Make sure that there are no present blocks between the writer and | |
156 // the requested position, as that will cause the writer to quit. | |
157 if (closest_writer > closest_block) { | |
158 provider = writer_index_[closest_writer]; | |
159 DCHECK(provider); | |
160 } | |
161 } | |
162 if (!provider) { | |
163 DCHECK(writer_index_.find(pos) == writer_index_.end()); | |
164 provider = writer_index_[pos] = CreateWriter(pos); | |
165 provider->SetAvailableCallback(base::Bind(&MultiBuffer::DataProviderEvent, | |
166 base::Unretained(this), | |
167 base::Unretained(provider))); | |
DaleCurtis
2015/11/11 23:48:17
Second unretained necessary?
hubbe
2015/11/12 22:18:37
Apparently so, removed.
| |
168 } | |
169 provider->SetDeferred(false); | |
170 } | |
171 | |
172 void MultiBuffer::RemoveReader(const BlockId& pos, Reader* reader) { | |
173 auto i = readers_.find(pos); | |
174 if (i == readers_.end()) | |
175 return; | |
176 i->second.erase(reader); | |
177 if (i->second.empty()) { | |
178 readers_.erase(i); | |
179 } | |
180 } | |
181 | |
182 void MultiBuffer::CleanupWriters(const BlockId& pos) { | |
183 BlockId p2 = pos + kMaxWaitForReaderOffset; | |
184 BlockId closest_writer = ClosestPreviousEntry(writer_index_, p2); | |
185 while (closest_writer > pos - kMaxWaitForWriterOffset) { | |
186 DCHECK(writer_index_[closest_writer]); | |
187 DataProviderEvent(writer_index_[closest_writer]); | |
188 closest_writer = ClosestPreviousEntry(writer_index_, closest_writer - 1); | |
189 } | |
190 } | |
191 | |
192 bool MultiBuffer::Contains(const BlockId& pos) const { | |
193 DCHECK(present_[pos] == 0 || present_[pos] == 1) | |
194 << " pos = " << pos << " present_[pos] " << present_[pos]; | |
195 DCHECK_EQ(present_[pos], data_.find(pos) != data_.end() ? 1 : 0); | |
196 return !!present_[pos]; | |
197 } | |
198 | |
199 MultiBufferBlockId MultiBuffer::FindNextUnavailable(const BlockId& pos) const { | |
200 auto i = present_.find(pos); | |
201 if (i.value()) | |
202 return i.range_end(); | |
203 return pos; | |
204 } | |
205 | |
206 void MultiBuffer::NotifyAvailableRange( | |
207 const Range<MultiBufferBlockId>& observer_range, | |
208 const Range<MultiBufferBlockId>& new_range) { | |
209 std::set<Reader*> tmp; | |
210 for (auto i = readers_.lower_bound(observer_range.begin); | |
211 i != readers_.end() && i->first < observer_range.end; ++i) { | |
212 tmp.insert(i->second.begin(), i->second.end()); | |
213 } | |
214 for (Reader* reader : tmp) { | |
215 reader->NotifyAvailableRange(new_range); | |
216 } | |
217 } | |
218 | |
219 void MultiBuffer::ReleaseBlocks(const std::vector<MultiBufferBlockId> blocks) { | |
DaleCurtis
2015/11/11 23:48:17
Missing &
hubbe
2015/11/12 22:18:37
Done.
| |
220 RangeMap<BlockId, int32_t> freed; | |
221 for (MultiBufferBlockId to_free : blocks) { | |
222 DCHECK(data_[to_free]); | |
223 DCHECK_EQ(pinned_[to_free], 0); | |
224 DCHECK_EQ(present_[to_free], 1); | |
225 data_.erase(to_free); | |
226 freed.IncrementRange(to_free, to_free + 1, 1); | |
227 present_.IncrementRange(to_free, to_free + 1, -1); | |
228 } | |
229 lru_->IncrementDataSize(-static_cast<int64_t>(blocks.size())); | |
230 | |
231 for (const auto& freed_range : freed) { | |
232 if (freed_range.second) { | |
233 // Technically, there shouldn't be any observers in this range | |
234 // as all observers really should be pinning the range where it's | |
235 // actually observing. | |
236 NotifyAvailableRange( | |
237 freed_range.first, | |
238 // Empty range. | |
239 Range<BlockId>(freed_range.first.begin, freed_range.first.begin)); | |
240 | |
241 auto i = present_.find(freed_range.first.begin); | |
242 DCHECK_EQ(i.value(), 0); | |
243 DCHECK_LE(i.range_begin(), freed_range.first.begin); | |
244 DCHECK_LE(freed_range.first.end, i.range_end()); | |
245 | |
246 if (i.range_begin() == freed_range.first.begin) { | |
247 // Notify the previous range that it contains fewer blocks. | |
248 auto j = i; | |
DaleCurtis
2015/11/11 23:48:17
auto j = i - 1?
hubbe
2015/11/12 22:18:37
i/j are not RandomAccessIterators (They are Bidire
| |
249 --j; | |
250 DCHECK_EQ(j.value(), 1); | |
251 NotifyAvailableRange(j.range(), j.range()); | |
252 } | |
253 if (i.range_end() == freed_range.first.end) { | |
254 // Notify the following range that it contains fewer blocks. | |
255 auto j = i; | |
DaleCurtis
2015/11/11 23:48:17
auto j = i + 1
hubbe
2015/11/12 22:18:37
See above.
| |
256 ++j; | |
257 DCHECK_EQ(j.value(), 1); | |
258 NotifyAvailableRange(j.range(), j.range()); | |
259 } | |
260 } | |
261 } | |
262 } | |
263 | |
264 void MultiBuffer::AddProvider(scoped_ptr<DataProvider> provider) { | |
265 // If there is already a provider in the same location, we delete it. | |
266 DCHECK(!provider->Available()); | |
267 BlockId pos = provider->Tell(); | |
268 DataProvider** place = &writer_index_[pos]; | |
269 DCHECK_NE(*place, provider.get()); | |
270 if (*place) | |
271 delete *place; | |
272 *place = provider.release(); | |
273 } | |
274 | |
275 scoped_ptr<MultiBuffer::DataProvider> MultiBuffer::RemoveProvider( | |
276 DataProvider* provider) { | |
277 BlockId pos = provider->Tell(); | |
278 DCHECK_EQ(writer_index_[pos], provider); | |
279 writer_index_.erase(pos); | |
280 return scoped_ptr<DataProvider>(provider); | |
281 } | |
282 | |
283 MultiBuffer::ProviderState MultiBuffer::SuggestProviderState( | |
284 const BlockId& pos) const { | |
285 MultiBufferBlockId next_reader_pos = ClosestNextEntry(readers_, pos); | |
286 if (next_reader_pos != std::numeric_limits<MultiBufferBlockId>::max() && | |
287 (next_reader_pos - pos <= kMaxWaitForWriterOffset || !RangeSupported())) { | |
288 // Check if there is another writer between us and the next reader. | |
289 MultiBufferBlockId next_writer_pos = | |
290 ClosestNextEntry(writer_index_, pos + 1); | |
291 if (next_writer_pos > next_reader_pos) { | |
292 return ProviderStateLoad; | |
293 } | |
294 } | |
295 | |
296 MultiBufferBlockId previous_reader_pos = | |
297 ClosestPreviousEntry(readers_, pos - 1); | |
298 if (previous_reader_pos != std::numeric_limits<MultiBufferBlockId>::min() && | |
299 (pos - previous_reader_pos <= kMaxWaitForReaderOffset || | |
300 !RangeSupported())) { | |
301 MultiBufferBlockId previous_writer_pos = | |
302 ClosestPreviousEntry(writer_index_, pos - 1); | |
303 if (previous_writer_pos < previous_reader_pos) { | |
304 return ProviderStateDefer; | |
305 } | |
306 } | |
307 | |
308 return ProviderStateDead; | |
309 } | |
310 | |
311 bool MultiBuffer::ProviderCollision(const BlockId& id) const { | |
312 // If there is a writer at the same location, it is always a collision. | |
313 if (writer_index_.find(id) != writer_index_.end()) | |
314 return true; | |
315 | |
316 // Data already exists at providers current position, | |
317 // if the URL supports ranges, we can kill the data provider. | |
318 if (RangeSupported() && Contains(id)) | |
319 return true; | |
320 | |
321 return false; | |
322 } | |
323 | |
324 void MultiBuffer::Prune(size_t max_to_free) { | |
325 lru_->Prune(max_to_free); | |
326 } | |
327 | |
328 void MultiBuffer::DataProviderEvent(DataProvider* provider_tmp) { | |
329 scoped_ptr<DataProvider> provider(RemoveProvider(provider_tmp)); | |
330 BlockId start_pos = provider->Tell(); | |
331 BlockId pos = start_pos; | |
332 bool eof = false; | |
333 int64_t blocks_before = data_.size(); | |
334 | |
335 while (!ProviderCollision(pos) && !eof) { | |
336 if (!provider->Available()) { | |
337 AddProvider(provider.Pass()); | |
338 break; | |
339 } | |
340 DCHECK_GE(pos, 0); | |
341 scoped_refptr<DataBuffer> data = provider->Read(); | |
342 data_[pos] = data; | |
343 eof = data->end_of_stream(); | |
344 if (!pinned_[pos]) | |
345 lru_->Use(this, pos); | |
346 ++pos; | |
347 } | |
348 int64_t blocks_after = data_.size(); | |
349 int64_t blocks_added = blocks_after - blocks_before; | |
350 | |
351 if (pos > start_pos) { | |
352 present_.SetRange(start_pos, pos, 1); | |
353 Range<BlockId> expanded_range = present_.find(start_pos).range(); | |
354 NotifyAvailableRange(expanded_range, expanded_range); | |
355 | |
356 lru_->IncrementDataSize(blocks_added); | |
357 Prune(blocks_added * kMaxFreesPerAdd + 1); | |
358 } | |
359 | |
360 // Check that it's still there before we try to delete it. | |
361 // In case of EOF or a collision, we might not have called AddProvider above. | |
362 // Even if we did call AddProvider, calling NotifyAvailableRange can cause | |
363 // readers to seek or self-destruct and clean up any associated writers. | |
364 auto i = writer_index_.find(pos); | |
365 if (i != writer_index_.end() && i->second == provider_tmp) { | |
366 switch (SuggestProviderState(pos)) { | |
367 case ProviderStateLoad: | |
368 // Not sure we actually need to do this | |
369 provider_tmp->SetDeferred(false); | |
370 break; | |
371 case ProviderStateDefer: | |
372 provider_tmp->SetDeferred(true); | |
373 break; | |
374 case ProviderStateDead: | |
375 RemoveProvider(provider_tmp); | |
376 break; | |
377 } | |
378 } | |
379 } | |
380 | |
381 void MultiBuffer::MergeFrom(MultiBuffer* other) { | |
382 // Import data and update LRU. | |
383 for (const auto& data : other->data_) { | |
384 if (data_.insert(std::make_pair(data.first, data.second)).second) { | |
385 if (!pinned_[data.first]) { | |
386 lru_->Insert(this, data.first); | |
387 } | |
388 } | |
389 } | |
390 // Update present_ | |
391 for (const auto& r : other->present_) { | |
392 if (r.second) { | |
393 present_.SetRange(r.first.begin, r.first.end, 1); | |
394 } | |
395 } | |
396 // Notify existing readers. | |
397 auto last = present_.begin(); | |
398 for (const auto& r : other->present_) { | |
399 if (r.second) { | |
400 auto i = present_.find(r.first.begin); | |
401 if (i != last) { | |
402 NotifyAvailableRange(i.range(), i.range()); | |
403 last = i; | |
404 } | |
405 } | |
406 } | |
407 } | |
408 | |
409 void MultiBuffer::PinRange(const BlockId& from, | |
410 const BlockId& to, | |
411 int32_t how_much) { | |
412 DCHECK_NE(how_much, 0); | |
413 DVLOG(3) << "PINRANGE [" << from << " - " << to << ") += " << how_much; | |
414 pinned_.IncrementRange(from, to, how_much); | |
415 Range<BlockId> modified_range(from, to); | |
416 | |
417 // Iterate over all the modified ranges and check if any of them have | |
418 // transitioned in or out of the unlocked state. If so, we iterate over | |
419 // all buffers in that range and add/remove them from the LRU as approperiate. | |
420 // We iterate *backwards* through the ranges, with the idea that data in a | |
421 // continous range should be freed from the end first. | |
422 | |
423 if (data_.empty()) | |
424 return; | |
425 | |
426 auto range = pinned_.find(to - 1); | |
427 while (1) { | |
428 if (range.value() == 0 || range.value() == how_much) { | |
429 bool pin = range.value() == how_much; | |
430 Range<BlockId> transition_range = modified_range.Intersect(range.range()); | |
431 if (transition_range.Empty()) | |
432 break; | |
433 | |
434 // For each range that has transitioned to/from a pinned state, | |
435 // we iterate over the corresponding ranges in |present_| to find | |
436 // the blocks that are actually in the multibuffer. | |
437 for (auto present_block_range = present_.find(transition_range.end - 1); | |
438 present_block_range != present_.begin(); --present_block_range) { | |
439 if (!present_block_range.value()) | |
440 continue; | |
441 Range<BlockId> present_transitioned_range = | |
442 transition_range.Intersect(present_block_range.range()); | |
443 if (present_transitioned_range.Empty()) | |
444 break; | |
445 for (BlockId block = present_transitioned_range.end - 1; | |
446 block >= present_transitioned_range.begin; --block) { | |
447 DCHECK_GE(block, 0); | |
448 DCHECK(data_.find(block) != data_.end()); | |
449 if (pin) { | |
450 DCHECK(pinned_[block]); | |
451 lru_->Remove(this, block); | |
452 } else { | |
453 DCHECK(!pinned_[block]); | |
454 lru_->Insert(this, block); | |
455 } | |
456 } | |
457 } | |
458 } | |
459 if (range == pinned_.begin()) | |
460 break; | |
461 --range; | |
462 } | |
463 } | |
464 | |
465 void MultiBuffer::PinRanges(const RangeMap<BlockId, int32_t>& ranges) { | |
466 for (const auto& r : ranges) { | |
467 if (r.second != 0) { | |
468 PinRange(r.first.begin, r.first.end, r.second); | |
469 } | |
470 } | |
471 } | |
472 | |
473 void MultiBuffer::IncrementMaxSize(int32_t size) { | |
474 max_size_ += size; | |
475 lru_->IncrementMaxSize(size); | |
476 DCHECK_GE(max_size_, 0); | |
477 // Pruning only happens when blocks are added. | |
478 } | |
479 | |
480 } // namespace media | |
OLD | NEW |