OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "media/blink/multibuffer.h" |
| 6 |
| 7 #include "base/bind.h" |
| 8 |
| 9 namespace std { |
| 10 |
| 11 ostream& operator<<(ostream& o, const media::MultiBufferBlockId& id) { |
| 12 if (id.url_data()) { |
| 13 return o << "{" << id.url_data()->url() << ", " << id.block_num() << "}"; |
| 14 } else { |
| 15 return o << "{null, " << id.block_num() << "}"; |
| 16 } |
| 17 } |
| 18 |
| 19 } // namespace std |
| 20 |
| 21 namespace media { |
| 22 |
| 23 MultiBufferBlockId::MultiBufferBlockId() : |
| 24 url_data_(nullptr), |
| 25 block_num_(0) { |
| 26 } |
| 27 MultiBufferBlockId::MultiBufferBlockId(MultiBufferUrlData url_data, |
| 28 MultiBufferBlockNum block_num) : |
| 29 url_data_(url_data), |
| 30 block_num_(block_num) { |
| 31 } |
| 32 |
| 33 MultiBufferBlockId::MultiBufferBlockId(const MultiBufferBlockId& block_id) : |
| 34 url_data_(block_id.url_data()), |
| 35 block_num_(block_id.block_num()) { |
| 36 } |
| 37 |
| 38 MultiBufferBlockId::~MultiBufferBlockId() {} |
| 39 |
| 40 // Returns the block ID closest to (but less or equal than) |pos| from |index|. |
| 41 template<class T> |
| 42 static MultiBuffer::BlockId ClosestPreviousEntry( |
| 43 const std::map<MultiBuffer::BlockId, T>& index, |
| 44 MultiBuffer::BlockId pos) { |
| 45 auto i = index.upper_bound(pos); |
| 46 DCHECK(i == index.end() || i->first > pos); |
| 47 if (i == index.begin()) { |
| 48 return MultiBufferBlockId(); |
| 49 } |
| 50 --i; |
| 51 if (!i->first.SameUrl(pos)) { |
| 52 return MultiBufferBlockId(); |
| 53 } |
| 54 DCHECK_LE(i->first, pos); |
| 55 return i->first; |
| 56 } |
| 57 |
| 58 // Returns the block ID closest to (but greter than or equal to) |pos| |
| 59 // from |index|. |
| 60 template<class T> |
| 61 static MultiBuffer::BlockId ClosestNextEntry( |
| 62 const std::map<MultiBuffer::BlockId, T>& index, |
| 63 MultiBuffer::BlockId pos) { |
| 64 auto i = index.lower_bound(pos); |
| 65 if (i == index.end()) { |
| 66 return MultiBufferBlockId(); |
| 67 } |
| 68 if (!i->first.SameUrl(pos)) { |
| 69 return MultiBufferBlockId(); |
| 70 } |
| 71 DCHECK_GE(i->first, pos); |
| 72 return i->first; |
| 73 } |
| 74 |
| 75 // |
| 76 // MultiBuffer |
| 77 // |
| 78 MultiBuffer::MultiBuffer(int32_t block_size_shift) : |
| 79 max_size_(0), |
| 80 block_size_shift_(block_size_shift) { |
| 81 } |
| 82 |
| 83 MultiBuffer::~MultiBuffer() { |
| 84 // Delete all writers. |
| 85 for (const auto& i : writer_index_) { |
| 86 delete i.second; |
| 87 } |
| 88 } |
| 89 |
| 90 void MultiBuffer::AddReader(const BlockId& pos, Reader* reader) { |
| 91 std::set<Reader*> &set_of_readers = readers_[pos]; |
| 92 bool already_waited_for = !set_of_readers.empty(); |
| 93 set_of_readers.insert(reader); |
| 94 |
| 95 if (already_waited_for || Contains(pos)) { |
| 96 return; |
| 97 } |
| 98 |
| 99 // We may need to create a new data provider to service this request. |
| 100 // Look for an existing data provider first. |
| 101 DataProvider* provider = NULL; |
| 102 BlockId closest_writer = ClosestPreviousEntry(writer_index_, pos); |
| 103 |
| 104 if (pos - closest_writer < kMaxWaitForWriterOffset) { |
| 105 BlockId closest_block = ClosestPreviousEntry(data_, pos); |
| 106 if (pos - closest_block > pos - closest_writer) { |
| 107 provider = writer_index_[closest_writer]; |
| 108 DCHECK(provider); |
| 109 } |
| 110 } |
| 111 if (!provider) { |
| 112 provider = writer_index_[pos] = CreateWriter(pos); |
| 113 provider->SetAvailableCallback( |
| 114 base::Bind(&MultiBuffer::DataProviderEvent, |
| 115 base::Unretained(this), |
| 116 base::Unretained(provider))); |
| 117 } |
| 118 provider->SetDeferred(false); |
| 119 } |
| 120 |
| 121 void MultiBuffer::RemoveReader(const BlockId& pos, Reader* reader) { |
| 122 auto i = readers_.find(pos); |
| 123 if (i == readers_.end()) |
| 124 return; |
| 125 i->second.erase(reader); |
| 126 if (i->second.empty()) { |
| 127 readers_.erase(i); |
| 128 } |
| 129 } |
| 130 |
| 131 void MultiBuffer::CleanupWriters(const BlockId& pos) { |
| 132 BlockId closest_writer = ClosestPreviousEntry(writer_index_, pos); |
| 133 if (closest_writer == BlockId()) |
| 134 return; |
| 135 if (pos - closest_writer > kMaxWaitForWriterOffset) |
| 136 return; |
| 137 DCHECK(writer_index_[closest_writer]); |
| 138 DataProviderEvent(writer_index_[closest_writer]); |
| 139 } |
| 140 |
| 141 bool MultiBuffer::Contains(const BlockId& pos) const { |
| 142 DCHECK(present_[pos] == 0 || present_[pos] == 1) |
| 143 << " pos = " << pos |
| 144 << " present_[pos] " << present_[pos]; |
| 145 DCHECK_EQ(present_[pos], data_.find(pos) != data_.end() ? 1 : 0); |
| 146 return !!present_[pos]; |
| 147 } |
| 148 |
| 149 MultiBufferBlockId MultiBuffer::FindNextUnavailable(const BlockId& pos) const { |
| 150 auto i = present_.find(pos); |
| 151 if (i.value()) { |
| 152 return i.range_end(); |
| 153 } else { |
| 154 return pos; |
| 155 } |
| 156 } |
| 157 |
| 158 void MultiBuffer::NotifyAvailableRange( |
| 159 const Range<MultiBufferBlockId>& observer_range, |
| 160 const Range<MultiBufferBlockId>& new_range) { |
| 161 std::set<Reader*> tmp; |
| 162 for (auto i = readers_.lower_bound(observer_range.begin); |
| 163 i != readers_.end() && i->first < observer_range.end; |
| 164 ++i) { |
| 165 tmp.insert(i->second.begin(), i->second.end()); |
| 166 } |
| 167 for (Reader* reader: tmp) { |
| 168 reader->NotifyAvailableRange(new_range); |
| 169 } |
| 170 } |
| 171 |
| 172 void MultiBuffer::Prune(size_t max_to_free) { |
| 173 // Use a rangemap to merge all consequtive frees into |
| 174 // ranges, then notify observers of changes to those ranges. |
| 175 RangeMap<BlockId, int32_t> freed; |
| 176 while (data_.size() > max_size_ && !lru_.Empty() && max_to_free > 0) { |
| 177 BlockId to_free = lru_.Pop(); |
| 178 DCHECK(data_[to_free]); |
| 179 DCHECK_EQ(pinned_[to_free], 0); |
| 180 DCHECK_EQ(present_[to_free], 1); |
| 181 data_.erase(to_free); |
| 182 freed.IncrementRange(to_free, to_free + 1, 1); |
| 183 present_.IncrementRange(to_free, to_free + 1, -1); |
| 184 max_to_free--; |
| 185 } |
| 186 |
| 187 for (auto freed_iter = freed.first_range(); |
| 188 freed_iter != freed.last_range(); |
| 189 ++freed_iter) { |
| 190 if (freed_iter.value()) { |
| 191 // Technically, there shouldn't be any observers in this range |
| 192 // as all observers really should be pinning the range where it's |
| 193 // actually observing. |
| 194 NotifyAvailableRange( |
| 195 freed_iter.range(), |
| 196 // Empty range. |
| 197 Range<BlockId>(freed_iter.range_begin(), |
| 198 freed_iter.range_begin())); |
| 199 |
| 200 auto i = present_.find(freed_iter.range_begin()); |
| 201 DCHECK_EQ(i.value(), 0); |
| 202 DCHECK_LE(i.range_begin(), freed_iter.range_begin()); |
| 203 DCHECK_LE(freed_iter.range_end(), i.range_end()); |
| 204 |
| 205 if (i.range_begin() == freed_iter.range_begin()) { |
| 206 // Notify the previous range that it contains fewer blocks. |
| 207 auto j = i; |
| 208 --j; |
| 209 DCHECK_EQ(j.value(), 1); |
| 210 NotifyAvailableRange(j.range(), j.range()); |
| 211 } |
| 212 if (i.range_end() == freed_iter.range_end()) { |
| 213 // Notify the following range that it contains fewer blocks. |
| 214 auto j = i; |
| 215 ++j; |
| 216 DCHECK_EQ(j.value(), 1); |
| 217 NotifyAvailableRange(j.range(), j.range()); |
| 218 } |
| 219 } |
| 220 } |
| 221 } |
| 222 |
| 223 void MultiBuffer::AddProvider(scoped_ptr<DataProvider> provider) { |
| 224 // If there is already a provider in the same location, we delete it. |
| 225 DCHECK(!provider->Available()); |
| 226 BlockId pos = provider->Tell(); |
| 227 DataProvider** place = &writer_index_[pos]; |
| 228 DCHECK_NE(*place, provider.get()); |
| 229 if (*place) delete *place; |
| 230 *place = provider.release(); |
| 231 } |
| 232 |
| 233 scoped_ptr<MultiBuffer::DataProvider> MultiBuffer::RemoveProvider( |
| 234 DataProvider *provider) { |
| 235 BlockId pos = provider->Tell(); |
| 236 DCHECK_EQ(writer_index_[pos], provider); |
| 237 writer_index_.erase(pos); |
| 238 return scoped_ptr<DataProvider>(provider); |
| 239 } |
| 240 |
| 241 MultiBuffer::ProviderState MultiBuffer::SuggestProviderState( |
| 242 const BlockId& pos) const { |
| 243 MultiBufferBlockId next_reader_pos = ClosestNextEntry(readers_, pos); |
| 244 if (next_reader_pos != MultiBufferBlockId() && |
| 245 (next_reader_pos - pos <= kMaxWaitForWriterOffset || |
| 246 !pos.url_data()->range_supported())) { |
| 247 // Check if there is another writer between us and the next reader. |
| 248 MultiBufferBlockId next_writer_pos = ClosestNextEntry( |
| 249 writer_index_, pos + 1); |
| 250 if (next_writer_pos == MultiBufferBlockId() || |
| 251 next_writer_pos > next_reader_pos) { |
| 252 return ProviderStateLoad; |
| 253 } |
| 254 } |
| 255 |
| 256 MultiBufferBlockId previous_reader_pos = ClosestPreviousEntry( |
| 257 readers_, pos - 1); |
| 258 if (previous_reader_pos != MultiBufferBlockId() && |
| 259 (pos - previous_reader_pos <= kMaxWaitForReaderOffset || |
| 260 !pos.url_data()->range_supported())) { |
| 261 MultiBufferBlockId previous_writer_pos = |
| 262 ClosestPreviousEntry(writer_index_, pos - 1); |
| 263 if (previous_writer_pos < previous_reader_pos) { |
| 264 return ProviderStateDefer; |
| 265 } |
| 266 } |
| 267 |
| 268 return ProviderStateDead; |
| 269 } |
| 270 |
| 271 bool MultiBuffer::ProviderCollision(const BlockId& id) const { |
| 272 // If there is a writer at the same location, it is always a collision. |
| 273 if (writer_index_.find(id) != writer_index_.end()) |
| 274 return true; |
| 275 |
| 276 // Data already exists at providers current position, |
| 277 // if the URL supports ranges, we can kill the data provider. |
| 278 if (id.url_data()->range_supported() && Contains(id)) |
| 279 return true; |
| 280 |
| 281 return false; |
| 282 } |
| 283 |
| 284 void MultiBuffer::DataProviderEvent(DataProvider *provider_tmp) { |
| 285 scoped_ptr<DataProvider> provider(RemoveProvider(provider_tmp)); |
| 286 BlockId start_pos = provider->Tell(); |
| 287 BlockId pos = start_pos; |
| 288 bool eof = false; |
| 289 |
| 290 while (!ProviderCollision(pos) && !eof) { |
| 291 if (!provider->Available()) { |
| 292 AddProvider(provider.Pass()); |
| 293 break; |
| 294 } |
| 295 DCHECK_GE(pos.block_num(), 0); |
| 296 data_[pos] = provider->Read(); |
| 297 eof = data_[pos]->end_of_stream(); |
| 298 if (!pinned_[pos]) |
| 299 lru_.Use(pos); |
| 300 ++pos; |
| 301 } |
| 302 |
| 303 if (pos > start_pos) { |
| 304 present_.SetRange(start_pos, pos, 1); |
| 305 Range<BlockId> expanded_range = present_.find(start_pos).range(); |
| 306 NotifyAvailableRange(expanded_range, expanded_range); |
| 307 |
| 308 Prune((pos - start_pos) * kMaxFreesPerAdd + 1); |
| 309 } |
| 310 |
| 311 // Check that it's still there before we try to delete it. |
| 312 auto i = writer_index_.find(pos); |
| 313 if (i != writer_index_.end() && i->second == provider_tmp) { |
| 314 switch (SuggestProviderState(pos)) { |
| 315 case ProviderStateLoad: |
| 316 // Not sure we actually need to do this |
| 317 provider_tmp->SetDeferred(false); |
| 318 break; |
| 319 case ProviderStateDefer: |
| 320 provider_tmp->SetDeferred(true); |
| 321 break; |
| 322 case ProviderStateDead: |
| 323 RemoveProvider(provider_tmp); |
| 324 break; |
| 325 } |
| 326 } |
| 327 } |
| 328 |
| 329 void MultiBuffer::UpdateUrlData(const MultiBufferUrlData& old_url_data, |
| 330 const MultiBufferUrlData& new_url_data) { |
| 331 MultiBufferBlockId pos(old_url_data, 0); |
| 332 auto i = readers_.lower_bound(pos); |
| 333 while (i != readers_.end() && pos.SameUrl(i->first)) { |
| 334 std::set<Reader*> tmp; |
| 335 tmp.swap(i->second); |
| 336 auto j = i; |
| 337 ++j; |
| 338 readers_.erase(i); |
| 339 i = j; |
| 340 for (Reader* reader: tmp) { |
| 341 reader->UpdateUrlData(old_url_data, new_url_data); |
| 342 } |
| 343 } |
| 344 } |
| 345 |
| 346 |
| 347 void MultiBuffer::PinRange( |
| 348 const BlockId& from, const BlockId& to, int32_t howmuch) { |
| 349 DCHECK_NE(howmuch, 0); |
| 350 DVLOG(3) << "PINRANGE [" << from << " - " << to << ") += " << howmuch; |
| 351 pinned_.IncrementRange(from, to, howmuch); |
| 352 |
| 353 // Iterate over all the modified ranges and check if |
| 354 // any of them have transitioned in or out of the |
| 355 // unlocked state. If so, we iterate over all buffers |
| 356 // in that range and add/remove them from the LRU as |
| 357 // approperiate. We iterate *backwards* through the |
| 358 // ranges, with the idea that data in a continous range |
| 359 // should be freed from the end first. |
| 360 |
| 361 if (data_.empty()) |
| 362 return; |
| 363 |
| 364 auto range = pinned_.find(to - 1); |
| 365 while (1) { |
| 366 if (range.value() == 0 || range.value() == howmuch) { |
| 367 bool pin = range.value() == howmuch; |
| 368 BlockId begin = std::max(range.range_begin(), from); |
| 369 BlockId end = std::min(range.range_end(), to); |
| 370 if (begin >= end) |
| 371 break; |
| 372 DataMap::iterator k = data_.lower_bound(end); |
| 373 while (k != data_.begin()) { |
| 374 --k; |
| 375 if (k->first < begin) |
| 376 break; |
| 377 DCHECK(k->second); |
| 378 DCHECK_GE(k->first.block_num(), 0); |
| 379 if (pin) { |
| 380 DCHECK(pinned_[k->first]); |
| 381 lru_.Remove(k->first); |
| 382 } else { |
| 383 DCHECK(!pinned_[k->first]); |
| 384 lru_.Insert(k->first); |
| 385 } |
| 386 } |
| 387 } |
| 388 if (range == pinned_.first_range()) break; |
| 389 --range; |
| 390 } |
| 391 } |
| 392 |
| 393 void MultiBuffer::PinRanges(const RangeMap<BlockId, int32_t>& ranges) { |
| 394 // We know that "last_range" has a zero value, so it is |
| 395 // ok to skip over it. |
| 396 for (auto i = ranges.first_range(); i != ranges.last_range(); ++i) { |
| 397 if (i.value() != 0) { |
| 398 PinRange(i.range_begin(), i.range_end(), i.value()); |
| 399 } |
| 400 } |
| 401 } |
| 402 |
| 403 void MultiBuffer::IncrementMaxSize(int32_t size) { |
| 404 max_size_ += size; |
| 405 DCHECK_GE(max_size_, 0); |
| 406 // Pruning only happens when blocks are added. |
| 407 } |
| 408 |
| 409 } // namespace media |
OLD | NEW |