OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/bind.h" |
| 6 #include "base/callback_helpers.h" |
| 7 #include "base/message_loop/message_loop.h" |
| 8 #include "media/blink/multibuffer_reader.h" |
| 9 #include "net/base/net_errors.h" |
| 10 |
| 11 namespace media { |
| 12 |
| 13 MultiBufferReader::MultiBufferReader( |
| 14 MultiBuffer* multibuffer, |
| 15 int64_t start, |
| 16 int64_t end, |
| 17 const base::Callback<void(int64_t, int64_t)>& progress_callback) |
| 18 : multibuffer_(multibuffer), |
| 19 end_(end == -1LL ? (1LL << (multibuffer->block_size_shift() + 30)) : end), |
| 20 preload_high_(0), |
| 21 preload_low_(0), |
| 22 max_buffer_forward_(0), |
| 23 max_buffer_backward_(0), |
| 24 pos_(start), |
| 25 preload_pos_(-1), |
| 26 loading_(true), |
| 27 current_wait_size_(0), |
| 28 progress_callback_(progress_callback), |
| 29 weak_factory_(this) { |
| 30 DCHECK_GE(start, 0); |
| 31 DCHECK_GE(end_, 0); |
| 32 } |
| 33 |
| 34 MultiBufferReader::~MultiBufferReader() { |
| 35 multibuffer_->RemoveReader(preload_pos_, this); |
| 36 multibuffer_->IncrementMaxSize( |
| 37 -block_ceil(max_buffer_forward_ + max_buffer_backward_)); |
| 38 multibuffer_->PinRange(block(pos_ - max_buffer_backward_), |
| 39 block_ceil(pos_ + max_buffer_forward_), -1); |
| 40 multibuffer_->CleanupWriters(preload_pos_); |
| 41 } |
| 42 |
| 43 MultiBuffer::BlockId MultiBufferReader::block(int64_t byte_pos) const { |
| 44 return byte_pos >> multibuffer_->block_size_shift(); |
| 45 } |
| 46 |
| 47 MultiBuffer::BlockId MultiBufferReader::block_ceil(int64_t byte_pos) const { |
| 48 return block(byte_pos + (1LL << multibuffer_->block_size_shift()) - 1); |
| 49 } |
| 50 |
| 51 void MultiBufferReader::Seek(int64_t pos) { |
| 52 DCHECK_GE(pos, 0); |
| 53 if (pos == pos_) |
| 54 return; |
| 55 // Use a rangemap to compute the diff in pinning. |
| 56 RangeMap<MultiBuffer::BlockId, int32_t> tmp; |
| 57 tmp.IncrementRange(block(pos_ - max_buffer_backward_), |
| 58 block_ceil(pos_ + max_buffer_forward_), -1); |
| 59 tmp.IncrementRange(block(pos - max_buffer_backward_), |
| 60 block_ceil(pos + max_buffer_forward_), 1); |
| 61 |
| 62 multibuffer_->PinRanges(tmp); |
| 63 |
| 64 multibuffer_->RemoveReader(preload_pos_, this); |
| 65 MultiBufferBlockId old_preload_pos = preload_pos_; |
| 66 preload_pos_ = block(pos); |
| 67 pos_ = pos; |
| 68 UpdateInternalState(); |
| 69 multibuffer_->CleanupWriters(old_preload_pos); |
| 70 } |
| 71 |
| 72 void MultiBufferReader::SetMaxBuffer(int64_t backward, int64_t forward) { |
| 73 // Safe, because we know this doesn't actually prune the cache right away. |
| 74 multibuffer_->IncrementMaxSize( |
| 75 -block_ceil(max_buffer_forward_ + max_buffer_backward_)); |
| 76 // Use a rangemap to compute the diff in pinning. |
| 77 RangeMap<MultiBuffer::BlockId, int32_t> tmp; |
| 78 tmp.IncrementRange(block(pos_ - max_buffer_backward_), |
| 79 block_ceil(pos_ + max_buffer_forward_), -1); |
| 80 max_buffer_backward_ = backward; |
| 81 max_buffer_forward_ = forward; |
| 82 tmp.IncrementRange(block(pos_ - max_buffer_backward_), |
| 83 block_ceil(pos_ + max_buffer_forward_), 1); |
| 84 multibuffer_->PinRanges(tmp); |
| 85 |
| 86 multibuffer_->IncrementMaxSize( |
| 87 block_ceil(max_buffer_forward_ + max_buffer_backward_)); |
| 88 } |
| 89 |
| 90 int64_t MultiBufferReader::Available() const { |
| 91 int64_t unavailable_byte_pos = |
| 92 static_cast<int64_t>(multibuffer_->FindNextUnavailable(block(pos_))) |
| 93 << multibuffer_->block_size_shift(); |
| 94 return std::max<int64_t>(0, unavailable_byte_pos - pos_); |
| 95 } |
| 96 |
| 97 int64_t MultiBufferReader::TryRead(unsigned char* data, int64_t len) { |
| 98 DCHECK_GT(len, 0); |
| 99 current_wait_size_ = 0; |
| 100 cb_.Reset(); |
| 101 DCHECK_LE(pos_ + len, end_); |
| 102 const MultiBuffer::DataMap& data_map = multibuffer_->map(); |
| 103 MultiBuffer::DataMap::const_iterator i = data_map.find(block(pos_)); |
| 104 int64_t p = pos_; |
| 105 int64_t bytes_read = 0; |
| 106 while (bytes_read < len) { |
| 107 if (i == data_map.end()) |
| 108 break; |
| 109 if (i->first != block(p)) |
| 110 break; |
| 111 if (i->second->end_of_stream()) |
| 112 break; |
| 113 size_t offset = p & ((1LL << multibuffer_->block_size_shift()) - 1); |
| 114 size_t tocopy = |
| 115 std::min<size_t>(len - bytes_read, i->second->data_size() - offset); |
| 116 memcpy(data, i->second->data() + offset, tocopy); |
| 117 data += tocopy; |
| 118 bytes_read += tocopy; |
| 119 p += tocopy; |
| 120 ++i; |
| 121 } |
| 122 Seek(p); |
| 123 return bytes_read; |
| 124 } |
| 125 |
| 126 int MultiBufferReader::Wait(int64_t len, base::Closure cb) { |
| 127 DCHECK_LE(pos_ + len, end_); |
| 128 DCHECK_NE(Available(), -1); |
| 129 DCHECK_LE(len, max_buffer_forward_); |
| 130 current_wait_size_ = len; |
| 131 |
| 132 cb_.Reset(); |
| 133 UpdateInternalState(); |
| 134 |
| 135 if (Available() >= current_wait_size_) { |
| 136 return net::OK; |
| 137 } else { |
| 138 cb_ = cb; |
| 139 return net::ERR_IO_PENDING; |
| 140 } |
| 141 } |
| 142 |
| 143 void MultiBufferReader::SetPreload(int64_t preload_high, int64_t preload_low) { |
| 144 DCHECK_GE(preload_high, preload_low); |
| 145 multibuffer_->RemoveReader(preload_pos_, this); |
| 146 preload_pos_ = block(pos_); |
| 147 preload_high_ = preload_high; |
| 148 preload_low_ = preload_low; |
| 149 UpdateInternalState(); |
| 150 } |
| 151 |
| 152 bool MultiBufferReader::IsLoading() const { |
| 153 return loading_; |
| 154 } |
| 155 |
| 156 void MultiBufferReader::CheckWait() { |
| 157 if (!cb_.is_null() && |
| 158 (Available() >= current_wait_size_ || Available() == -1)) { |
| 159 base::MessageLoop::current()->PostTask( |
| 160 FROM_HERE, |
| 161 base::Bind(&MultiBufferReader::Call, weak_factory_.GetWeakPtr(), |
| 162 base::ResetAndReturn(&cb_))); |
| 163 } |
| 164 } |
| 165 |
| 166 void MultiBufferReader::Call(const base::Closure& cb) const { |
| 167 cb.Run(); |
| 168 } |
| 169 |
| 170 void MultiBufferReader::NotifyAvailableRange( |
| 171 const Range<MultiBufferBlockId>& range) { |
| 172 // Update end_ if we can. |
| 173 if (range.end > range.begin) { |
| 174 auto i = multibuffer_->map().find(range.end - 1); |
| 175 DCHECK(i != multibuffer_->map().end()); |
| 176 if (i->second->end_of_stream()) { |
| 177 // This is an upper limit because the last-to-one block is allowed |
| 178 // to be smaller than the rest of the blocks. |
| 179 int64_t size_upper_limit = static_cast<int64_t>(range.end) |
| 180 << multibuffer_->block_size_shift(); |
| 181 end_ = std::min(end_, size_upper_limit); |
| 182 } |
| 183 } |
| 184 UpdateInternalState(); |
| 185 if (!progress_callback_.is_null()) { |
| 186 base::MessageLoop::current()->PostTask( |
| 187 FROM_HERE, |
| 188 base::Bind(&MultiBufferReader::Call, weak_factory_.GetWeakPtr(), |
| 189 base::Bind(progress_callback_, |
| 190 static_cast<int64_t>(range.begin) |
| 191 << multibuffer_->block_size_shift(), |
| 192 static_cast<int64_t>(range.end) |
| 193 << multibuffer_->block_size_shift()))); |
| 194 // We may be destroyed, do not touch |this|. |
| 195 } |
| 196 } |
| 197 |
| 198 void MultiBufferReader::UpdateInternalState() { |
| 199 int64_t effective_preload = loading_ ? preload_high_ : preload_low_; |
| 200 |
| 201 loading_ = false; |
| 202 if (preload_pos_ == -1) { |
| 203 preload_pos_ = block(pos_); |
| 204 DCHECK_GE(preload_pos_, 0); |
| 205 } |
| 206 MultiBuffer::BlockId max_preload = block_ceil( |
| 207 std::min(end_, pos_ + std::max(effective_preload, current_wait_size_))); |
| 208 |
| 209 // Note that we might not have been added to the multibuffer, |
| 210 // removing ourselves is a no-op in that case. |
| 211 multibuffer_->RemoveReader(preload_pos_, this); |
| 212 |
| 213 // We explicitly allow preloading to go beyond the pinned region in the cache. |
| 214 // It only happens when we want to preload something into the disk cache. |
| 215 // Thus it is possible to have blocks between our current reading position |
| 216 // and preload_pos_ be unavailable. When we get a Seek() call (possibly |
| 217 // through TryRead()) we reset the preload_pos_ to the current reading |
| 218 // position, and preload_pos_ will become the first unavailable block after |
| 219 // our current reading position again. |
| 220 preload_pos_ = multibuffer_->FindNextUnavailable(preload_pos_); |
| 221 DCHECK_GE(preload_pos_, 0); |
| 222 |
| 223 DVLOG(3) << "UpdateInternalState" |
| 224 << " pp = " << preload_pos_ |
| 225 << " block_ceil(end_) = " << block_ceil(end_) << " end_ = " << end_ |
| 226 << " max_preload " << max_preload; |
| 227 |
| 228 if (preload_pos_ < block_ceil(end_)) { |
| 229 if (preload_pos_ < max_preload) { |
| 230 loading_ = true; |
| 231 multibuffer_->AddReader(preload_pos_, this); |
| 232 } else if (multibuffer_->Contains(preload_pos_ - 1)) { |
| 233 --preload_pos_; |
| 234 multibuffer_->AddReader(preload_pos_, this); |
| 235 } |
| 236 } |
| 237 CheckWait(); |
| 238 } |
| 239 |
| 240 } // namespace media |
OLD | NEW |