Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/media_source_state.h" | 5 #include "media/filters/media_source_state.h" |
| 6 | 6 |
| 7 #include <set> | |
| 8 | |
| 7 #include "base/callback_helpers.h" | 9 #include "base/callback_helpers.h" |
| 8 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| 9 #include "base/stl_util.h" | 11 #include "base/stl_util.h" |
| 10 #include "base/strings/string_number_conversions.h" | 12 #include "base/strings/string_number_conversions.h" |
| 11 #include "media/base/media_switches.h" | 13 #include "media/base/media_switches.h" |
| 12 #include "media/base/media_track.h" | 14 #include "media/base/media_track.h" |
| 13 #include "media/base/media_tracks.h" | 15 #include "media/base/media_tracks.h" |
| 16 #include "media/base/mime_util.h" | |
| 14 #include "media/filters/chunk_demuxer.h" | 17 #include "media/filters/chunk_demuxer.h" |
| 15 #include "media/filters/frame_processor.h" | 18 #include "media/filters/frame_processor.h" |
| 16 #include "media/filters/source_buffer_stream.h" | 19 #include "media/filters/source_buffer_stream.h" |
| 17 | 20 |
| 18 namespace media { | 21 namespace media { |
| 19 | 22 |
| 20 enum { | 23 enum { |
| 21 // Limits the number of MEDIA_LOG() calls warning the user that a muxed stream | 24 // Limits the number of MEDIA_LOG() calls warning the user that a muxed stream |
| 22 // media segment is missing a block from at least one of the audio or video | 25 // media segment is missing a block from at least one of the audio or video |
| 23 // tracks. | 26 // tracks. |
| 24 kMaxMissingTrackInSegmentLogs = 10, | 27 kMaxMissingTrackInSegmentLogs = 10, |
| 25 }; | 28 }; |
| 26 | 29 |
| 27 static TimeDelta EndTimestamp(const StreamParser::BufferQueue& queue) { | 30 namespace { |
| 31 | |
| 32 TimeDelta EndTimestamp(const StreamParser::BufferQueue& queue) { | |
| 28 return queue.back()->timestamp() + queue.back()->duration(); | 33 return queue.back()->timestamp() + queue.back()->duration(); |
| 29 } | 34 } |
| 30 | 35 |
| 36 const char* ToStr(MediaTrack::Type type) { | |
|
wolenetz
2016/09/13 21:03:14
nit: useful elsewhere? If so, move to some method
servolk
2016/09/14 18:15:25
Done.
| |
| 37 switch (type) { | |
| 38 case MediaTrack::Audio: | |
| 39 return "audio"; | |
| 40 case MediaTrack::Text: | |
| 41 return "text"; | |
| 42 case MediaTrack::Video: | |
| 43 return "video"; | |
| 44 } | |
| 45 NOTREACHED(); | |
| 46 return "INVALID"; | |
| 47 } | |
| 48 | |
| 49 } // namespace | |
| 50 | |
| 31 // List of time ranges for each SourceBuffer. | 51 // List of time ranges for each SourceBuffer. |
| 32 // static | 52 // static |
| 33 Ranges<TimeDelta> MediaSourceState::ComputeRangesIntersection( | 53 Ranges<TimeDelta> MediaSourceState::ComputeRangesIntersection( |
| 34 const RangesList& active_ranges, | 54 const RangesList& active_ranges, |
| 35 bool ended) { | 55 bool ended) { |
| 36 // TODO(servolk): Perhaps this can be removed in favor of blink implementation | 56 // TODO(servolk): Perhaps this can be removed in favor of blink implementation |
| 37 // (MediaSource::buffered)? Currently this is only used on Android and for | 57 // (MediaSource::buffered)? Currently this is only used on Android and for |
| 38 // updating DemuxerHost's buffered ranges during AppendData() as well as | 58 // updating DemuxerHost's buffered ranges during AppendData() as well as |
| 39 // SourceBuffer.buffered property implemetation. | 59 // SourceBuffer.buffered property implemetation. |
| 40 // Implementation of HTMLMediaElement.buffered algorithm in MSE spec. | 60 // Implementation of HTMLMediaElement.buffered algorithm in MSE spec. |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 87 } | 107 } |
| 88 | 108 |
| 89 MediaSourceState::MediaSourceState( | 109 MediaSourceState::MediaSourceState( |
| 90 std::unique_ptr<StreamParser> stream_parser, | 110 std::unique_ptr<StreamParser> stream_parser, |
| 91 std::unique_ptr<FrameProcessor> frame_processor, | 111 std::unique_ptr<FrameProcessor> frame_processor, |
| 92 const CreateDemuxerStreamCB& create_demuxer_stream_cb, | 112 const CreateDemuxerStreamCB& create_demuxer_stream_cb, |
| 93 const scoped_refptr<MediaLog>& media_log) | 113 const scoped_refptr<MediaLog>& media_log) |
| 94 : create_demuxer_stream_cb_(create_demuxer_stream_cb), | 114 : create_demuxer_stream_cb_(create_demuxer_stream_cb), |
| 95 timestamp_offset_during_append_(NULL), | 115 timestamp_offset_during_append_(NULL), |
| 96 parsing_media_segment_(false), | 116 parsing_media_segment_(false), |
| 97 media_segment_contained_audio_frame_(false), | |
| 98 media_segment_contained_video_frame_(false), | |
| 99 stream_parser_(stream_parser.release()), | 117 stream_parser_(stream_parser.release()), |
| 100 audio_(NULL), | |
| 101 video_(NULL), | |
| 102 frame_processor_(frame_processor.release()), | 118 frame_processor_(frame_processor.release()), |
| 103 media_log_(media_log), | 119 media_log_(media_log), |
| 104 state_(UNINITIALIZED), | 120 state_(UNINITIALIZED), |
| 105 auto_update_timestamp_offset_(false) { | 121 auto_update_timestamp_offset_(false) { |
| 106 DCHECK(!create_demuxer_stream_cb_.is_null()); | 122 DCHECK(!create_demuxer_stream_cb_.is_null()); |
| 107 DCHECK(frame_processor_); | 123 DCHECK(frame_processor_); |
| 108 } | 124 } |
| 109 | 125 |
| 110 MediaSourceState::~MediaSourceState() { | 126 MediaSourceState::~MediaSourceState() { |
| 111 Shutdown(); | 127 Shutdown(); |
| 112 | 128 |
| 113 base::STLDeleteValues(&text_stream_map_); | 129 base::STLDeleteValues(&text_stream_map_); |
| 114 } | 130 } |
| 115 | 131 |
| 116 void MediaSourceState::Init( | 132 void MediaSourceState::Init( |
| 117 const StreamParser::InitCB& init_cb, | 133 const StreamParser::InitCB& init_cb, |
| 118 bool allow_audio, | 134 const std::string& expected_codecs, |
| 119 bool allow_video, | |
| 120 const StreamParser::EncryptedMediaInitDataCB& encrypted_media_init_data_cb, | 135 const StreamParser::EncryptedMediaInitDataCB& encrypted_media_init_data_cb, |
| 121 const NewTextTrackCB& new_text_track_cb) { | 136 const NewTextTrackCB& new_text_track_cb) { |
| 122 DCHECK_EQ(state_, UNINITIALIZED); | 137 DCHECK_EQ(state_, UNINITIALIZED); |
| 123 new_text_track_cb_ = new_text_track_cb; | 138 new_text_track_cb_ = new_text_track_cb; |
| 124 init_cb_ = init_cb; | 139 init_cb_ = init_cb; |
| 125 | 140 |
| 126 state_ = PENDING_PARSER_CONFIG; | 141 state_ = PENDING_PARSER_CONFIG; |
| 127 stream_parser_->Init( | 142 stream_parser_->Init( |
| 128 base::Bind(&MediaSourceState::OnSourceInitDone, base::Unretained(this)), | 143 base::Bind(&MediaSourceState::OnSourceInitDone, base::Unretained(this)), |
| 129 base::Bind(&MediaSourceState::OnNewConfigs, base::Unretained(this), | 144 base::Bind(&MediaSourceState::OnNewConfigs, base::Unretained(this), |
| 130 allow_audio, allow_video), | 145 expected_codecs), |
| 131 base::Bind(&MediaSourceState::OnNewBuffers, base::Unretained(this)), | 146 base::Bind(&MediaSourceState::OnNewBuffers, base::Unretained(this)), |
| 132 new_text_track_cb_.is_null(), encrypted_media_init_data_cb, | 147 new_text_track_cb_.is_null(), encrypted_media_init_data_cb, |
| 133 base::Bind(&MediaSourceState::OnNewMediaSegment, base::Unretained(this)), | 148 base::Bind(&MediaSourceState::OnNewMediaSegment, base::Unretained(this)), |
| 134 base::Bind(&MediaSourceState::OnEndOfMediaSegment, | 149 base::Bind(&MediaSourceState::OnEndOfMediaSegment, |
| 135 base::Unretained(this)), | 150 base::Unretained(this)), |
| 136 media_log_); | 151 media_log_); |
| 137 } | 152 } |
| 138 | 153 |
| 139 void MediaSourceState::SetSequenceMode(bool sequence_mode) { | 154 void MediaSourceState::SetSequenceMode(bool sequence_mode) { |
| 140 DCHECK(!parsing_media_segment_); | 155 DCHECK(!parsing_media_segment_); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 189 DCHECK(!timestamp_offset_during_append_); | 204 DCHECK(!timestamp_offset_during_append_); |
| 190 timestamp_offset_during_append_ = timestamp_offset; | 205 timestamp_offset_during_append_ = timestamp_offset; |
| 191 append_window_start_during_append_ = append_window_start; | 206 append_window_start_during_append_ = append_window_start; |
| 192 append_window_end_during_append_ = append_window_end; | 207 append_window_end_during_append_ = append_window_end; |
| 193 | 208 |
| 194 stream_parser_->Flush(); | 209 stream_parser_->Flush(); |
| 195 timestamp_offset_during_append_ = NULL; | 210 timestamp_offset_during_append_ = NULL; |
| 196 | 211 |
| 197 frame_processor_->Reset(); | 212 frame_processor_->Reset(); |
| 198 parsing_media_segment_ = false; | 213 parsing_media_segment_ = false; |
| 199 media_segment_contained_audio_frame_ = false; | 214 media_segment_has_data_for_track_.clear(); |
| 200 media_segment_contained_video_frame_ = false; | |
| 201 } | 215 } |
| 202 | 216 |
| 203 void MediaSourceState::Remove(TimeDelta start, | 217 void MediaSourceState::Remove(TimeDelta start, |
| 204 TimeDelta end, | 218 TimeDelta end, |
| 205 TimeDelta duration) { | 219 TimeDelta duration) { |
| 206 if (audio_) | 220 for (const auto& it : audio_streams_) { |
| 207 audio_->Remove(start, end, duration); | 221 it.second->Remove(start, end, duration); |
| 222 } | |
| 208 | 223 |
| 209 if (video_) | 224 for (const auto& it : video_streams_) { |
| 210 video_->Remove(start, end, duration); | 225 it.second->Remove(start, end, duration); |
| 226 } | |
| 211 | 227 |
| 212 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 228 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 213 itr != text_stream_map_.end(); ++itr) { | 229 itr != text_stream_map_.end(); ++itr) { |
| 214 itr->second->Remove(start, end, duration); | 230 itr->second->Remove(start, end, duration); |
| 215 } | 231 } |
| 216 } | 232 } |
| 217 | 233 |
| 218 size_t MediaSourceState::EstimateVideoDataSize( | |
| 219 size_t muxed_data_chunk_size) const { | |
| 220 DCHECK(audio_); | |
| 221 DCHECK(video_); | |
| 222 | |
| 223 size_t videoBufferedSize = video_->GetBufferedSize(); | |
| 224 size_t audioBufferedSize = audio_->GetBufferedSize(); | |
| 225 if (videoBufferedSize == 0 || audioBufferedSize == 0) { | |
| 226 // At this point either audio or video buffer is empty, which means buffer | |
| 227 // levels are probably low anyway and we should have enough space in the | |
| 228 // buffers for appending new data, so just take a very rough guess. | |
| 229 return muxed_data_chunk_size * 7 / 8; | |
| 230 } | |
| 231 | |
| 232 // We need to estimate how much audio and video data is going to be in the | |
| 233 // newly appended data chunk to make space for the new data. And we need to do | |
| 234 // that without parsing the data (which will happen later, in the Append | |
| 235 // phase). So for now we can only rely on some heuristic here. Let's assume | |
| 236 // that the proportion of the audio/video in the new data chunk is the same as | |
| 237 // the current ratio of buffered audio/video. | |
| 238 // Longer term this should go away once we further change the MSE GC algorithm | |
| 239 // to work across all streams of a SourceBuffer (see crbug.com/520704). | |
| 240 double videoBufferedSizeF = static_cast<double>(videoBufferedSize); | |
| 241 double audioBufferedSizeF = static_cast<double>(audioBufferedSize); | |
| 242 | |
| 243 double totalBufferedSizeF = videoBufferedSizeF + audioBufferedSizeF; | |
| 244 CHECK_GT(totalBufferedSizeF, 0.0); | |
| 245 | |
| 246 double videoRatio = videoBufferedSizeF / totalBufferedSizeF; | |
| 247 CHECK_GE(videoRatio, 0.0); | |
| 248 CHECK_LE(videoRatio, 1.0); | |
| 249 double estimatedVideoSize = muxed_data_chunk_size * videoRatio; | |
| 250 return static_cast<size_t>(estimatedVideoSize); | |
| 251 } | |
| 252 | |
| 253 bool MediaSourceState::EvictCodedFrames(DecodeTimestamp media_time, | 234 bool MediaSourceState::EvictCodedFrames(DecodeTimestamp media_time, |
| 254 size_t newDataSize) { | 235 size_t newDataSize) { |
| 255 bool success = true; | 236 bool success = true; |
| 256 | 237 |
| 257 DVLOG(3) << __func__ << " media_time=" << media_time.InSecondsF() | 238 DVLOG(3) << __func__ << " media_time=" << media_time.InSecondsF() |
| 258 << " newDataSize=" << newDataSize | 239 << " newDataSize=" << newDataSize; |
| 259 << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0) | |
| 260 << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0); | |
| 261 | 240 |
| 262 size_t newAudioSize = 0; | 241 DVLOG(4) << "Before EvictCodedFrames:"; |
| 263 size_t newVideoSize = 0; | 242 for (const auto& it : audio_streams_) { |
| 264 if (audio_ && video_) { | 243 DVLOG(4) << "Audio track_id=" << it.second->media_track_id() |
| 265 newVideoSize = EstimateVideoDataSize(newDataSize); | 244 << " buffered_size=" << it.second->GetBufferedSize(); |
| 266 newAudioSize = newDataSize - newVideoSize; | 245 } |
| 267 } else if (video_) { | 246 for (const auto& it : video_streams_) { |
| 268 newVideoSize = newDataSize; | 247 DVLOG(4) << "Video track_id=" << it.second->media_track_id() |
| 269 } else if (audio_) { | 248 << " buffered_size=" << it.second->GetBufferedSize(); |
| 270 newAudioSize = newDataSize; | |
| 271 } | 249 } |
| 272 | 250 |
| 273 DVLOG(3) << __func__ | 251 size_t estimatedAudioSize = newDataSize; |
| 274 << " estimated audio/video sizes: newVideoSize=" << newVideoSize | 252 size_t estimatedVideoSize = newDataSize; |
| 275 << " newAudioSize=" << newAudioSize; | 253 if (!audio_streams_.empty() && !video_streams_.empty()) { |
| 254 estimatedAudioSize = newDataSize / 16; | |
|
wolenetz
2016/09/13 21:03:14
nit: Assumption is overall, across all A+V tracks
servolk
2016/09/14 18:15:26
Yeah, after pondering this a bit more, I think we
wolenetz
2016/09/14 23:31:21
Acknowledged.
| |
| 255 estimatedVideoSize = newDataSize - estimatedAudioSize; | |
| 256 } | |
| 257 if (audio_streams_.size() > 0) | |
| 258 estimatedAudioSize /= audio_streams_.size(); | |
| 259 if (video_streams_.size() > 0) | |
| 260 estimatedVideoSize /= video_streams_.size(); | |
| 276 | 261 |
| 277 if (audio_) | 262 for (const auto& it : audio_streams_) { |
| 278 success = audio_->EvictCodedFrames(media_time, newAudioSize) && success; | 263 success &= it.second->EvictCodedFrames(media_time, estimatedAudioSize); |
| 279 | 264 } |
| 280 if (video_) | 265 for (const auto& it : video_streams_) { |
| 281 success = video_->EvictCodedFrames(media_time, newVideoSize) && success; | 266 success &= it.second->EvictCodedFrames(media_time, estimatedVideoSize); |
| 267 } | |
| 282 | 268 |
| 283 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 269 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 284 itr != text_stream_map_.end(); ++itr) { | 270 itr != text_stream_map_.end(); ++itr) { |
| 285 success = itr->second->EvictCodedFrames(media_time, 0) && success; | 271 success &= itr->second->EvictCodedFrames(media_time, 0); |
| 286 } | 272 } |
| 287 | 273 |
| 288 DVLOG(3) << __func__ << " result=" << success | 274 DVLOG(4) << "After EvictCodedFrames (success=" << success << "):"; |
| 289 << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0) | 275 for (const auto& it : audio_streams_) { |
| 290 << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0); | 276 DVLOG(4) << "Audio track_id=" << it.second->media_track_id() |
| 277 << " buffered_size=" << it.second->GetBufferedSize(); | |
| 278 } | |
| 279 for (const auto& it : video_streams_) { | |
| 280 DVLOG(4) << "Video track_id=" << it.second->media_track_id() | |
| 281 << " buffered_size=" << it.second->GetBufferedSize(); | |
| 282 } | |
| 291 | 283 |
| 292 return success; | 284 return success; |
| 293 } | 285 } |
| 294 | 286 |
| 295 Ranges<TimeDelta> MediaSourceState::GetBufferedRanges(TimeDelta duration, | 287 Ranges<TimeDelta> MediaSourceState::GetBufferedRanges(TimeDelta duration, |
|
wolenetz
2016/09/13 21:03:14
hmm. I think that TODO was wrong. SourceBuffer.buf
servolk
2016/09/14 18:15:26
Ok, in that case we can just drop the check for en
| |
| 296 bool ended) const { | 288 bool ended) const { |
| 297 // TODO(acolwell): When we start allowing disabled tracks we'll need to update | |
| 298 // this code to only add ranges from active tracks. | |
| 299 RangesList ranges_list; | 289 RangesList ranges_list; |
| 300 if (audio_) | 290 for (const auto& it : audio_streams_) { |
| 301 ranges_list.push_back(audio_->GetBufferedRanges(duration)); | 291 if (it.second->enabled()) |
| 292 ranges_list.push_back(it.second->GetBufferedRanges(duration)); | |
| 293 } | |
| 302 | 294 |
| 303 if (video_) | 295 for (const auto& it : video_streams_) { |
| 304 ranges_list.push_back(video_->GetBufferedRanges(duration)); | 296 if (it.second->enabled()) |
| 297 ranges_list.push_back(it.second->GetBufferedRanges(duration)); | |
| 298 } | |
| 305 | 299 |
| 306 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); | 300 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); |
| 307 itr != text_stream_map_.end(); ++itr) { | 301 itr != text_stream_map_.end(); ++itr) { |
| 308 ranges_list.push_back(itr->second->GetBufferedRanges(duration)); | 302 ranges_list.push_back(itr->second->GetBufferedRanges(duration)); |
| 309 } | 303 } |
| 310 | 304 |
| 311 return ComputeRangesIntersection(ranges_list, ended); | 305 return ComputeRangesIntersection(ranges_list, ended); |
| 312 } | 306 } |
| 313 | 307 |
| 314 TimeDelta MediaSourceState::GetHighestPresentationTimestamp() const { | 308 TimeDelta MediaSourceState::GetHighestPresentationTimestamp() const { |
| 315 TimeDelta max_pts; | 309 TimeDelta max_pts; |
| 316 | 310 |
| 317 if (audio_) | 311 for (const auto& it : audio_streams_) { |
| 318 max_pts = std::max(max_pts, audio_->GetHighestPresentationTimestamp()); | 312 max_pts = std::max(max_pts, it.second->GetHighestPresentationTimestamp()); |
| 313 } | |
| 319 | 314 |
| 320 if (video_) | 315 for (const auto& it : video_streams_) { |
| 321 max_pts = std::max(max_pts, video_->GetHighestPresentationTimestamp()); | 316 max_pts = std::max(max_pts, it.second->GetHighestPresentationTimestamp()); |
| 317 } | |
| 322 | 318 |
| 323 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); | 319 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); |
| 324 itr != text_stream_map_.end(); ++itr) { | 320 itr != text_stream_map_.end(); ++itr) { |
| 325 max_pts = std::max(max_pts, itr->second->GetHighestPresentationTimestamp()); | 321 max_pts = std::max(max_pts, itr->second->GetHighestPresentationTimestamp()); |
| 326 } | 322 } |
| 327 | 323 |
| 328 return max_pts; | 324 return max_pts; |
| 329 } | 325 } |
| 330 | 326 |
| 331 TimeDelta MediaSourceState::GetMaxBufferedDuration() const { | 327 TimeDelta MediaSourceState::GetMaxBufferedDuration() const { |
| 332 TimeDelta max_duration; | 328 TimeDelta max_duration; |
| 333 | 329 |
| 334 if (audio_) | 330 for (const auto& it : audio_streams_) { |
| 335 max_duration = std::max(max_duration, audio_->GetBufferedDuration()); | 331 max_duration = std::max(max_duration, it.second->GetBufferedDuration()); |
| 332 } | |
| 336 | 333 |
| 337 if (video_) | 334 for (const auto& it : video_streams_) { |
| 338 max_duration = std::max(max_duration, video_->GetBufferedDuration()); | 335 max_duration = std::max(max_duration, it.second->GetBufferedDuration()); |
| 336 } | |
| 339 | 337 |
| 340 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); | 338 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); |
| 341 itr != text_stream_map_.end(); ++itr) { | 339 itr != text_stream_map_.end(); ++itr) { |
| 342 max_duration = std::max(max_duration, itr->second->GetBufferedDuration()); | 340 max_duration = std::max(max_duration, itr->second->GetBufferedDuration()); |
| 343 } | 341 } |
| 344 | 342 |
| 345 return max_duration; | 343 return max_duration; |
| 346 } | 344 } |
| 347 | 345 |
| 348 void MediaSourceState::StartReturningData() { | 346 void MediaSourceState::StartReturningData() { |
| 349 if (audio_) | 347 for (const auto& it : audio_streams_) { |
| 350 audio_->StartReturningData(); | 348 it.second->StartReturningData(); |
| 349 } | |
| 351 | 350 |
| 352 if (video_) | 351 for (const auto& it : video_streams_) { |
| 353 video_->StartReturningData(); | 352 it.second->StartReturningData(); |
| 353 } | |
| 354 | 354 |
| 355 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 355 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 356 itr != text_stream_map_.end(); ++itr) { | 356 itr != text_stream_map_.end(); ++itr) { |
| 357 itr->second->StartReturningData(); | 357 itr->second->StartReturningData(); |
| 358 } | 358 } |
| 359 } | 359 } |
| 360 | 360 |
| 361 void MediaSourceState::AbortReads() { | 361 void MediaSourceState::AbortReads() { |
| 362 if (audio_) | 362 for (const auto& it : audio_streams_) { |
| 363 audio_->AbortReads(); | 363 it.second->AbortReads(); |
| 364 } | |
| 364 | 365 |
| 365 if (video_) | 366 for (const auto& it : video_streams_) { |
| 366 video_->AbortReads(); | 367 it.second->AbortReads(); |
| 368 } | |
| 367 | 369 |
| 368 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 370 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 369 itr != text_stream_map_.end(); ++itr) { | 371 itr != text_stream_map_.end(); ++itr) { |
| 370 itr->second->AbortReads(); | 372 itr->second->AbortReads(); |
| 371 } | 373 } |
| 372 } | 374 } |
| 373 | 375 |
| 374 void MediaSourceState::Seek(TimeDelta seek_time) { | 376 void MediaSourceState::Seek(TimeDelta seek_time) { |
| 375 if (audio_) | 377 for (const auto& it : audio_streams_) { |
| 376 audio_->Seek(seek_time); | 378 it.second->Seek(seek_time); |
| 379 } | |
| 377 | 380 |
| 378 if (video_) | 381 for (const auto& it : video_streams_) { |
| 379 video_->Seek(seek_time); | 382 it.second->Seek(seek_time); |
| 383 } | |
| 380 | 384 |
| 381 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 385 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 382 itr != text_stream_map_.end(); ++itr) { | 386 itr != text_stream_map_.end(); ++itr) { |
| 383 itr->second->Seek(seek_time); | 387 itr->second->Seek(seek_time); |
| 384 } | 388 } |
| 385 } | 389 } |
| 386 | 390 |
| 387 void MediaSourceState::CompletePendingReadIfPossible() { | 391 void MediaSourceState::CompletePendingReadIfPossible() { |
| 388 if (audio_) | 392 for (const auto& it : audio_streams_) { |
| 389 audio_->CompletePendingReadIfPossible(); | 393 it.second->CompletePendingReadIfPossible(); |
| 394 } | |
| 390 | 395 |
| 391 if (video_) | 396 for (const auto& it : video_streams_) { |
| 392 video_->CompletePendingReadIfPossible(); | 397 it.second->CompletePendingReadIfPossible(); |
| 398 } | |
| 393 | 399 |
| 394 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 400 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 395 itr != text_stream_map_.end(); ++itr) { | 401 itr != text_stream_map_.end(); ++itr) { |
| 396 itr->second->CompletePendingReadIfPossible(); | 402 itr->second->CompletePendingReadIfPossible(); |
| 397 } | 403 } |
| 398 } | 404 } |
| 399 | 405 |
| 400 void MediaSourceState::OnSetDuration(TimeDelta duration) { | 406 void MediaSourceState::OnSetDuration(TimeDelta duration) { |
| 401 if (audio_) | 407 for (const auto& it : audio_streams_) { |
| 402 audio_->OnSetDuration(duration); | 408 it.second->OnSetDuration(duration); |
| 409 } | |
| 403 | 410 |
| 404 if (video_) | 411 for (const auto& it : video_streams_) { |
| 405 video_->OnSetDuration(duration); | 412 it.second->OnSetDuration(duration); |
| 413 } | |
| 406 | 414 |
| 407 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 415 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 408 itr != text_stream_map_.end(); ++itr) { | 416 itr != text_stream_map_.end(); ++itr) { |
| 409 itr->second->OnSetDuration(duration); | 417 itr->second->OnSetDuration(duration); |
| 410 } | 418 } |
| 411 } | 419 } |
| 412 | 420 |
| 413 void MediaSourceState::MarkEndOfStream() { | 421 void MediaSourceState::MarkEndOfStream() { |
| 414 if (audio_) | 422 for (const auto& it : audio_streams_) { |
| 415 audio_->MarkEndOfStream(); | 423 it.second->MarkEndOfStream(); |
| 424 } | |
| 416 | 425 |
| 417 if (video_) | 426 for (const auto& it : video_streams_) { |
| 418 video_->MarkEndOfStream(); | 427 it.second->MarkEndOfStream(); |
| 428 } | |
| 419 | 429 |
| 420 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 430 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 421 itr != text_stream_map_.end(); ++itr) { | 431 itr != text_stream_map_.end(); ++itr) { |
| 422 itr->second->MarkEndOfStream(); | 432 itr->second->MarkEndOfStream(); |
| 423 } | 433 } |
| 424 } | 434 } |
| 425 | 435 |
| 426 void MediaSourceState::UnmarkEndOfStream() { | 436 void MediaSourceState::UnmarkEndOfStream() { |
| 427 if (audio_) | 437 for (const auto& it : audio_streams_) { |
| 428 audio_->UnmarkEndOfStream(); | 438 it.second->UnmarkEndOfStream(); |
| 439 } | |
| 429 | 440 |
| 430 if (video_) | 441 for (const auto& it : video_streams_) { |
| 431 video_->UnmarkEndOfStream(); | 442 it.second->UnmarkEndOfStream(); |
| 443 } | |
| 432 | 444 |
| 433 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 445 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 434 itr != text_stream_map_.end(); ++itr) { | 446 itr != text_stream_map_.end(); ++itr) { |
| 435 itr->second->UnmarkEndOfStream(); | 447 itr->second->UnmarkEndOfStream(); |
| 436 } | 448 } |
| 437 } | 449 } |
| 438 | 450 |
| 439 void MediaSourceState::Shutdown() { | 451 void MediaSourceState::Shutdown() { |
| 440 if (audio_) | 452 for (const auto& it : audio_streams_) { |
| 441 audio_->Shutdown(); | 453 it.second->Shutdown(); |
| 454 } | |
| 442 | 455 |
| 443 if (video_) | 456 for (const auto& it : video_streams_) { |
| 444 video_->Shutdown(); | 457 it.second->Shutdown(); |
| 458 } | |
| 445 | 459 |
| 446 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 460 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 447 itr != text_stream_map_.end(); ++itr) { | 461 itr != text_stream_map_.end(); ++itr) { |
| 448 itr->second->Shutdown(); | 462 itr->second->Shutdown(); |
| 449 } | 463 } |
| 450 } | 464 } |
| 451 | 465 |
| 452 void MediaSourceState::SetMemoryLimits(DemuxerStream::Type type, | 466 void MediaSourceState::SetMemoryLimits(DemuxerStream::Type type, |
|
wolenetz
2016/09/13 21:03:14
nit:ForTest here, in ChunkDemuxer, and in Pipeline
servolk
2016/09/14 18:15:26
Well, because of the cmdline flags I think we shou
wolenetz
2016/09/14 23:31:21
Unless I'm missing something new in this CL, or cs
servolk
2016/09/15 00:18:32
Ah, ok, you are right, I've got confused, thought
| |
| 453 size_t memory_limit) { | 467 size_t memory_limit) { |
| 454 switch (type) { | 468 switch (type) { |
| 455 case DemuxerStream::AUDIO: | 469 case DemuxerStream::AUDIO: |
| 456 if (audio_) | 470 for (const auto& it : audio_streams_) { |
| 457 audio_->SetStreamMemoryLimit(memory_limit); | 471 it.second->SetStreamMemoryLimit(memory_limit); |
| 472 } | |
| 458 break; | 473 break; |
| 459 case DemuxerStream::VIDEO: | 474 case DemuxerStream::VIDEO: |
| 460 if (video_) | 475 for (const auto& it : video_streams_) { |
| 461 video_->SetStreamMemoryLimit(memory_limit); | 476 it.second->SetStreamMemoryLimit(memory_limit); |
| 477 } | |
| 462 break; | 478 break; |
| 463 case DemuxerStream::TEXT: | 479 case DemuxerStream::TEXT: |
| 464 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 480 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 465 itr != text_stream_map_.end(); ++itr) { | 481 itr != text_stream_map_.end(); ++itr) { |
| 466 itr->second->SetStreamMemoryLimit(memory_limit); | 482 itr->second->SetStreamMemoryLimit(memory_limit); |
| 467 } | 483 } |
| 468 break; | 484 break; |
| 469 case DemuxerStream::UNKNOWN: | 485 case DemuxerStream::UNKNOWN: |
| 470 case DemuxerStream::NUM_TYPES: | 486 case DemuxerStream::NUM_TYPES: |
| 471 NOTREACHED(); | 487 NOTREACHED(); |
| 472 break; | 488 break; |
| 473 } | 489 } |
| 474 } | 490 } |
| 475 | 491 |
| 476 bool MediaSourceState::IsSeekWaitingForData() const { | 492 bool MediaSourceState::IsSeekWaitingForData() const { |
| 477 if (audio_ && audio_->IsSeekWaitingForData()) | 493 for (const auto& it : audio_streams_) { |
| 478 return true; | 494 if (it.second->IsSeekWaitingForData()) |
| 495 return true; | |
| 496 } | |
| 479 | 497 |
| 480 if (video_ && video_->IsSeekWaitingForData()) | 498 for (const auto& it : video_streams_) { |
| 481 return true; | 499 if (it.second->IsSeekWaitingForData()) |
| 500 return true; | |
| 501 } | |
| 482 | 502 |
| 483 // NOTE: We are intentionally not checking the text tracks | 503 // NOTE: We are intentionally not checking the text tracks |
| 484 // because text tracks are discontinuous and may not have data | 504 // because text tracks are discontinuous and may not have data |
| 485 // for the seek position. This is ok and playback should not be | 505 // for the seek position. This is ok and playback should not be |
| 486 // stalled because we don't have cues. If cues, with timestamps after | 506 // stalled because we don't have cues. If cues, with timestamps after |
| 487 // the seek time, eventually arrive they will be delivered properly | 507 // the seek time, eventually arrive they will be delivered properly |
| 488 // in response to ChunkDemuxerStream::Read() calls. | 508 // in response to ChunkDemuxerStream::Read() calls. |
| 489 | 509 |
| 490 return false; | 510 return false; |
| 491 } | 511 } |
| 492 | 512 |
| 513 bool CheckBytestreamTrackIds( | |
|
wolenetz
2016/09/13 21:03:14
move to anon namespace at top of file
servolk
2016/09/14 18:15:25
Done.
| |
| 514 const MediaTracks& tracks, | |
| 515 const StreamParser::TextTrackConfigMap& text_configs) { | |
| 516 std::set<StreamParser::TrackId> bytestream_ids; | |
| 517 for (const auto& track : tracks.tracks()) { | |
| 518 const StreamParser::TrackId& track_id = track->bytestream_track_id(); | |
| 519 if (bytestream_ids.find(track_id) != bytestream_ids.end()) { | |
| 520 return false; | |
| 521 } | |
| 522 bytestream_ids.insert(track_id); | |
| 523 } | |
| 524 for (const auto& text_track : text_configs) { | |
| 525 const StreamParser::TrackId& track_id = text_track.first; | |
| 526 if (bytestream_ids.find(track_id) != bytestream_ids.end()) { | |
| 527 return false; | |
| 528 } | |
| 529 bytestream_ids.insert(track_id); | |
| 530 } | |
| 531 return true; | |
| 532 } | |
| 533 | |
| 493 bool MediaSourceState::OnNewConfigs( | 534 bool MediaSourceState::OnNewConfigs( |
| 494 bool allow_audio, | 535 std::string expected_codecs, |
| 495 bool allow_video, | |
| 496 std::unique_ptr<MediaTracks> tracks, | 536 std::unique_ptr<MediaTracks> tracks, |
| 497 const StreamParser::TextTrackConfigMap& text_configs) { | 537 const StreamParser::TextTrackConfigMap& text_configs) { |
| 538 DCHECK(tracks.get()); | |
| 539 DVLOG(1) << __func__ << " expected_codecs=" << expected_codecs | |
| 540 << " tracks=" << tracks->tracks().size(); | |
| 498 DCHECK_GE(state_, PENDING_PARSER_CONFIG); | 541 DCHECK_GE(state_, PENDING_PARSER_CONFIG); |
| 499 DCHECK(tracks.get()); | 542 |
| 500 | 543 // Check that there is no clashing bytestream track ids. |
| 501 MediaTrack* audio_track = nullptr; | 544 if (!CheckBytestreamTrackIds(*tracks, text_configs)) { |
| 502 MediaTrack* video_track = nullptr; | 545 MEDIA_LOG(ERROR, media_log_) |
| 503 AudioDecoderConfig audio_config; | 546 << "Error: duplicate bytestream track ids detected"; |
|
wolenetz
2016/09/13 21:03:13
nit: s/Error: duplicate/Duplicate/ (ERROR already
servolk
2016/09/14 18:15:26
Done.
| |
| 504 VideoDecoderConfig video_config; | 547 for (const auto& track : tracks->tracks()) { |
| 548 const StreamParser::TrackId& track_id = track->bytestream_track_id(); | |
| 549 MEDIA_LOG(ERROR, media_log_) << ToStr(track->type()) << " track " | |
|
wolenetz
2016/09/13 21:03:13
nit:s/ERROR/DEBUG/ so we can expose the last cache
servolk
2016/09/14 18:15:25
Done.
| |
| 550 << " bytestream track id=" << track_id; | |
| 551 } | |
| 552 return false; | |
| 553 } | |
| 554 | |
| 555 // MSE spec allows new configs to be emitted only during Append, but not | |
| 556 // during Flush or parser reset operations. | |
| 557 CHECK(append_in_progress_); | |
| 558 | |
| 559 bool success = true; | |
| 560 | |
| 561 std::vector<std::string> expected_codecs_parsed; | |
| 562 ParseCodecString(expected_codecs, &expected_codecs_parsed, false); | |
|
wolenetz
2016/09/13 21:03:14
nit: Every init segment, we parse these? Can we in
servolk
2016/09/14 18:15:25
Yeah, I don't think it's going to make a big diffe
wolenetz
2016/09/14 23:31:21
Acknowledged.
| |
| 563 | |
| 564 std::vector<AudioCodec> expected_acodecs; | |
| 565 std::vector<VideoCodec> expected_vcodecs; | |
| 566 for (const auto& codec_id : expected_codecs_parsed) { | |
| 567 AudioCodec acodec = StringToAudioCodec(codec_id); | |
| 568 if (acodec != kUnknownAudioCodec) { | |
| 569 expected_acodecs.push_back(acodec); | |
| 570 continue; | |
| 571 } | |
| 572 VideoCodec vcodec = StringToVideoCodec(codec_id); | |
| 573 if (vcodec != kUnknownVideoCodec) { | |
| 574 expected_vcodecs.push_back(vcodec); | |
| 575 continue; | |
| 576 } | |
| 577 MEDIA_LOG(INFO, media_log_) << "Unrecognized media codec: " << codec_id; | |
| 578 } | |
| 579 | |
| 505 for (const auto& track : tracks->tracks()) { | 580 for (const auto& track : tracks->tracks()) { |
| 506 const auto& track_id = track->bytestream_track_id(); | 581 const auto& track_id = track->bytestream_track_id(); |
| 507 | 582 |
| 508 if (track->type() == MediaTrack::Audio) { | 583 if (track->type() == MediaTrack::Audio) { |
| 509 if (audio_track) { | 584 AudioDecoderConfig audio_config = tracks->getAudioConfig(track_id); |
| 510 MEDIA_LOG(ERROR, media_log_) | 585 DVLOG(1) << "Audio track_id=" << track_id |
| 511 << "Error: more than one audio track is currently not supported."; | 586 << " config: " << audio_config.AsHumanReadableString(); |
| 587 DCHECK(audio_config.IsValidConfig()); | |
| 588 | |
| 589 const auto& it = std::find(expected_acodecs.begin(), | |
| 590 expected_acodecs.end(), audio_config.codec()); | |
| 591 if (it == expected_acodecs.end()) { | |
| 592 MEDIA_LOG(ERROR, media_log_) << "Audio stream codec " | |
| 593 << GetCodecName(audio_config.codec()) | |
| 594 << " doesn't match SourceBuffer codecs."; | |
| 512 return false; | 595 return false; |
| 513 } | 596 } |
| 514 audio_track = track.get(); | 597 expected_acodecs.erase(it); |
|
wolenetz
2016/09/13 21:03:13
This is a problem for the following test I suggest
servolk
2016/09/14 18:15:26
Wait, shouldn't the mime type be 'audio/webm; code
wolenetz
2016/09/14 23:31:21
I've filed spec bug (MSE vNext) https://github.com
servolk
2016/09/15 00:18:32
Done.
| |
| 515 audio_config = tracks->getAudioConfig(track_id); | 598 |
| 516 DCHECK(audio_config.IsValidConfig()); | 599 ChunkDemuxerStream* stream = nullptr; |
| 600 if (!first_init_segment_received_) { | |
| 601 DCHECK(audio_streams_.find(track_id) == audio_streams_.end()); | |
| 602 stream = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO); | |
| 603 if (!stream || !frame_processor_->AddTrack(track_id, stream)) { | |
| 604 MEDIA_LOG(ERROR, media_log_) << "Failed to create audio stream."; | |
| 605 return false; | |
| 606 } | |
| 607 audio_streams_[track_id] = stream; | |
| 608 media_log_->SetBooleanProperty("found_audio_stream", true); | |
| 609 media_log_->SetStringProperty("audio_codec_name", | |
| 610 GetCodecName(audio_config.codec())); | |
| 611 } else { | |
| 612 if (audio_streams_.size() > 1) { | |
| 613 stream = audio_streams_[track_id]; | |
| 614 } else { | |
| 615 // If there is only one video track then bytestream id might change in | |
|
wolenetz
2016/09/13 21:03:14
nit:s/video/audio/
servolk
2016/09/14 18:15:26
Done.
| |
| 616 // a new init segment. So update our state and nofity frame processor. | |
| 617 const auto& it = audio_streams_.begin(); | |
| 618 if (it != audio_streams_.end()) { | |
| 619 stream = it->second; | |
| 620 if (it->first != track_id) { | |
| 621 frame_processor_->UpdateTrack(it->first, track_id); | |
| 622 audio_streams_[track_id] = stream; | |
| 623 audio_streams_.erase(it->first); | |
| 624 } | |
| 625 } | |
| 626 } | |
| 627 if (!stream) { | |
| 628 MEDIA_LOG(ERROR, media_log_) << "Got unexpected audio track" | |
| 629 << " track_id=" << track_id; | |
| 630 return false; | |
| 631 } | |
| 632 } | |
| 633 | |
| 634 track->set_id(stream->media_track_id()); | |
| 635 frame_processor_->OnPossibleAudioConfigUpdate(audio_config); | |
| 636 success &= stream->UpdateAudioConfig(audio_config, media_log_); | |
| 517 } else if (track->type() == MediaTrack::Video) { | 637 } else if (track->type() == MediaTrack::Video) { |
| 518 if (video_track) { | 638 VideoDecoderConfig video_config = tracks->getVideoConfig(track_id); |
| 519 MEDIA_LOG(ERROR, media_log_) | 639 DVLOG(1) << "Video track_id=" << track_id |
| 520 << "Error: more than one video track is currently not supported."; | 640 << " config: " << video_config.AsHumanReadableString(); |
| 641 DCHECK(video_config.IsValidConfig()); | |
| 642 | |
| 643 const auto& it = std::find(expected_vcodecs.begin(), | |
| 644 expected_vcodecs.end(), video_config.codec()); | |
| 645 if (it == expected_vcodecs.end()) { | |
| 646 MEDIA_LOG(ERROR, media_log_) << "Video stream codec " | |
| 647 << GetCodecName(video_config.codec()) | |
| 648 << " doesn't match SourceBuffer codecs."; | |
| 521 return false; | 649 return false; |
| 522 } | 650 } |
| 523 video_track = track.get(); | 651 expected_vcodecs.erase(it); |
|
wolenetz
2016/09/13 21:03:13
ditto missing test and problem for multiple video
servolk
2016/09/14 18:15:26
Acknowledged.
| |
| 524 video_config = tracks->getVideoConfig(track_id); | 652 |
| 525 DCHECK(video_config.IsValidConfig()); | 653 ChunkDemuxerStream* stream = nullptr; |
| 654 if (!first_init_segment_received_) { | |
| 655 DCHECK(video_streams_.find(track_id) == video_streams_.end()); | |
| 656 stream = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO); | |
| 657 if (!stream || !frame_processor_->AddTrack(track_id, stream)) { | |
| 658 MEDIA_LOG(ERROR, media_log_) << "Failed to create video stream."; | |
| 659 return false; | |
| 660 } | |
| 661 video_streams_[track_id] = stream; | |
| 662 media_log_->SetBooleanProperty("found_video_stream", true); | |
| 663 media_log_->SetStringProperty("video_codec_name", | |
| 664 GetCodecName(video_config.codec())); | |
| 665 } else { | |
| 666 if (video_streams_.size() > 1) { | |
| 667 stream = video_streams_[track_id]; | |
| 668 } else { | |
| 669 // If there is only one video track then bytestream id might change in | |
| 670 // a new init segment. So update our state and nofity frame processor. | |
| 671 const auto& it = video_streams_.begin(); | |
| 672 if (it != video_streams_.end()) { | |
| 673 stream = it->second; | |
| 674 if (it->first != track_id) { | |
| 675 frame_processor_->UpdateTrack(it->first, track_id); | |
| 676 video_streams_[track_id] = stream; | |
| 677 video_streams_.erase(it->first); | |
| 678 } | |
| 679 } | |
| 680 } | |
| 681 if (!stream) { | |
| 682 MEDIA_LOG(ERROR, media_log_) << "Got unexpected video track" | |
| 683 << " track_id=" << track_id; | |
| 684 return false; | |
| 685 } | |
| 686 } | |
| 687 | |
| 688 track->set_id(stream->media_track_id()); | |
| 689 success &= stream->UpdateVideoConfig(video_config, media_log_); | |
| 526 } else { | 690 } else { |
| 527 MEDIA_LOG(ERROR, media_log_) << "Error: unsupported media track type " | 691 MEDIA_LOG(ERROR, media_log_) << "Error: unsupported media track type " |
| 528 << track->type(); | 692 << track->type(); |
| 529 return false; | 693 return false; |
| 530 } | 694 } |
| 531 } | 695 } |
| 532 | 696 |
| 533 DVLOG(1) << "OnNewConfigs(" << allow_audio << ", " << allow_video << ", " | 697 if (!expected_acodecs.empty() || !expected_vcodecs.empty()) { |
| 534 << audio_config.IsValidConfig() << ", " | 698 for (const auto& acodec : expected_acodecs) { |
| 535 << video_config.IsValidConfig() << ")"; | 699 MEDIA_LOG(ERROR, media_log_) << "Initialization segment misses expected " |
| 536 // MSE spec allows new configs to be emitted only during Append, but not | 700 << GetCodecName(acodec) << " track."; |
| 537 // during Flush or parser reset operations. | 701 } |
| 538 CHECK(append_in_progress_); | 702 for (const auto& vcodec : expected_vcodecs) { |
| 539 | 703 MEDIA_LOG(ERROR, media_log_) << "Initialization segment misses expected " |
| 540 if (!audio_config.IsValidConfig() && !video_config.IsValidConfig()) { | 704 << GetCodecName(vcodec) << " track."; |
| 541 DVLOG(1) << "OnNewConfigs() : Audio & video config are not valid!"; | 705 } |
| 542 return false; | 706 return false; |
| 543 } | 707 } |
| 544 | 708 |
| 545 // Signal an error if we get configuration info for stream types that weren't | |
| 546 // specified in AddId() or more configs after a stream is initialized. | |
| 547 if (allow_audio != audio_config.IsValidConfig()) { | |
| 548 MEDIA_LOG(ERROR, media_log_) | |
| 549 << "Initialization segment" | |
| 550 << (audio_config.IsValidConfig() ? " has" : " does not have") | |
| 551 << " an audio track, but the mimetype" | |
| 552 << (allow_audio ? " specifies" : " does not specify") | |
| 553 << " an audio codec."; | |
| 554 return false; | |
| 555 } | |
| 556 | |
| 557 if (allow_video != video_config.IsValidConfig()) { | |
| 558 MEDIA_LOG(ERROR, media_log_) | |
| 559 << "Initialization segment" | |
| 560 << (video_config.IsValidConfig() ? " has" : " does not have") | |
| 561 << " a video track, but the mimetype" | |
| 562 << (allow_video ? " specifies" : " does not specify") | |
| 563 << " a video codec."; | |
| 564 return false; | |
| 565 } | |
| 566 | |
| 567 bool success = true; | |
| 568 if (audio_config.IsValidConfig()) { | |
| 569 if (!audio_) { | |
| 570 media_log_->SetBooleanProperty("found_audio_stream", true); | |
| 571 } | |
| 572 if (!audio_ || | |
| 573 audio_->audio_decoder_config().codec() != audio_config.codec()) { | |
| 574 media_log_->SetStringProperty("audio_codec_name", | |
| 575 GetCodecName(audio_config.codec())); | |
| 576 } | |
| 577 | |
| 578 bool audio_stream_just_created = false; | |
| 579 if (!audio_) { | |
| 580 audio_ = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO); | |
| 581 | |
| 582 if (!audio_) { | |
| 583 DVLOG(1) << "Failed to create an audio stream."; | |
| 584 return false; | |
| 585 } | |
| 586 audio_stream_just_created = true; | |
| 587 | |
| 588 if (!frame_processor_->AddTrack(FrameProcessor::kAudioTrackId, audio_)) { | |
| 589 DVLOG(1) << "Failed to add audio track to frame processor."; | |
| 590 return false; | |
| 591 } | |
| 592 } | |
| 593 | |
| 594 frame_processor_->OnPossibleAudioConfigUpdate(audio_config); | |
| 595 success &= audio_->UpdateAudioConfig(audio_config, media_log_); | |
| 596 | |
| 597 if (audio_stream_just_created) { | |
| 598 std::string audio_buf_limit_switch = | |
| 599 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( | |
| 600 switches::kMSEAudioBufferSizeLimit); | |
| 601 unsigned audio_buf_size_limit = 0; | |
| 602 if (base::StringToUint(audio_buf_limit_switch, &audio_buf_size_limit) && | |
| 603 audio_buf_size_limit > 0) { | |
| 604 MEDIA_LOG(INFO, media_log_) << "Custom audio SourceBuffer size limit=" | |
| 605 << audio_buf_size_limit; | |
| 606 audio_->SetStreamMemoryLimit(audio_buf_size_limit); | |
| 607 } | |
| 608 } | |
| 609 } | |
| 610 | |
| 611 if (video_config.IsValidConfig()) { | |
| 612 if (!video_) { | |
| 613 media_log_->SetBooleanProperty("found_video_stream", true); | |
| 614 } | |
| 615 if (!video_ || | |
| 616 video_->video_decoder_config().codec() != video_config.codec()) { | |
| 617 media_log_->SetStringProperty("video_codec_name", | |
| 618 GetCodecName(video_config.codec())); | |
| 619 } | |
| 620 | |
| 621 bool video_stream_just_created = false; | |
| 622 if (!video_) { | |
| 623 video_ = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO); | |
| 624 | |
| 625 if (!video_) { | |
| 626 DVLOG(1) << "Failed to create a video stream."; | |
| 627 return false; | |
| 628 } | |
| 629 video_stream_just_created = true; | |
| 630 | |
| 631 if (!frame_processor_->AddTrack(FrameProcessor::kVideoTrackId, video_)) { | |
| 632 DVLOG(1) << "Failed to add video track to frame processor."; | |
| 633 return false; | |
| 634 } | |
| 635 } | |
| 636 | |
| 637 success &= video_->UpdateVideoConfig(video_config, media_log_); | |
| 638 | |
| 639 if (video_stream_just_created) { | |
| 640 std::string video_buf_limit_switch = | |
| 641 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( | |
| 642 switches::kMSEVideoBufferSizeLimit); | |
| 643 unsigned video_buf_size_limit = 0; | |
| 644 if (base::StringToUint(video_buf_limit_switch, &video_buf_size_limit) && | |
| 645 video_buf_size_limit > 0) { | |
| 646 MEDIA_LOG(INFO, media_log_) << "Custom video SourceBuffer size limit=" | |
| 647 << video_buf_size_limit; | |
| 648 video_->SetStreamMemoryLimit(video_buf_size_limit); | |
| 649 } | |
| 650 } | |
| 651 } | |
| 652 | |
| 653 typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr; | 709 typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr; |
| 654 if (text_stream_map_.empty()) { | 710 if (text_stream_map_.empty()) { |
| 655 for (TextConfigItr itr = text_configs.begin(); itr != text_configs.end(); | 711 for (TextConfigItr itr = text_configs.begin(); itr != text_configs.end(); |
| 656 ++itr) { | 712 ++itr) { |
| 657 ChunkDemuxerStream* const text_stream = | 713 ChunkDemuxerStream* const text_stream = |
| 658 create_demuxer_stream_cb_.Run(DemuxerStream::TEXT); | 714 create_demuxer_stream_cb_.Run(DemuxerStream::TEXT); |
| 659 if (!frame_processor_->AddTrack(itr->first, text_stream)) { | 715 if (!frame_processor_->AddTrack(itr->first, text_stream)) { |
| 660 success &= false; | 716 success &= false; |
| 661 MEDIA_LOG(ERROR, media_log_) << "Failed to add text track ID " | 717 MEDIA_LOG(ERROR, media_log_) << "Failed to add text track ID " |
| 662 << itr->first << " to frame processor."; | 718 << itr->first << " to frame processor."; |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 718 success &= false; | 774 success &= false; |
| 719 MEDIA_LOG(ERROR, media_log_) << "New text track config for track ID " | 775 MEDIA_LOG(ERROR, media_log_) << "New text track config for track ID " |
| 720 << config_itr->first | 776 << config_itr->first |
| 721 << " does not match old one."; | 777 << " does not match old one."; |
| 722 break; | 778 break; |
| 723 } | 779 } |
| 724 } | 780 } |
| 725 } | 781 } |
| 726 } | 782 } |
| 727 | 783 |
| 784 if (audio_streams_.empty() && video_streams_.empty()) { | |
| 785 DVLOG(1) << __func__ << ": couldn't find a valid audio or video stream"; | |
| 786 return false; | |
| 787 } | |
| 788 | |
| 728 frame_processor_->SetAllTrackBuffersNeedRandomAccessPoint(); | 789 frame_processor_->SetAllTrackBuffersNeedRandomAccessPoint(); |
| 729 | 790 |
| 730 if (audio_track) { | 791 if (!first_init_segment_received_) { |
| 731 DCHECK(audio_); | 792 first_init_segment_received_ = true; |
| 732 audio_track->set_id(audio_->media_track_id()); | 793 SetStreamMemoryLimits(); |
| 733 } | |
| 734 if (video_track) { | |
| 735 DCHECK(video_); | |
| 736 video_track->set_id(video_->media_track_id()); | |
| 737 } | 794 } |
| 738 | 795 |
| 739 DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed"); | 796 DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed"); |
| 740 if (success) { | 797 if (success) { |
| 741 if (state_ == PENDING_PARSER_CONFIG) | 798 if (state_ == PENDING_PARSER_CONFIG) |
| 742 state_ = PENDING_PARSER_INIT; | 799 state_ = PENDING_PARSER_INIT; |
| 743 DCHECK(!init_segment_received_cb_.is_null()); | 800 DCHECK(!init_segment_received_cb_.is_null()); |
| 744 init_segment_received_cb_.Run(std::move(tracks)); | 801 init_segment_received_cb_.Run(std::move(tracks)); |
| 745 } | 802 } |
| 746 | 803 |
| 747 return success; | 804 return success; |
| 748 } | 805 } |
| 749 | 806 |
| 807 void MediaSourceState::SetStreamMemoryLimits() { | |
| 808 auto cmd_line = base::CommandLine::ForCurrentProcess(); | |
| 809 | |
| 810 std::string audio_buf_limit_switch = | |
| 811 cmd_line->GetSwitchValueASCII(switches::kMSEAudioBufferSizeLimit); | |
| 812 unsigned audio_buf_size_limit = 0; | |
| 813 if (base::StringToUint(audio_buf_limit_switch, &audio_buf_size_limit) && | |
| 814 audio_buf_size_limit > 0) { | |
| 815 MEDIA_LOG(INFO, media_log_) << "Custom audio SourceBuffer size limit=" | |
|
wolenetz
2016/09/13 21:03:14
nit: audio *per-track* SourceBuffer size limit=...
servolk
2016/09/14 18:15:26
Done.
| |
| 816 << audio_buf_size_limit; | |
| 817 for (const auto& it : audio_streams_) { | |
| 818 it.second->SetStreamMemoryLimit(audio_buf_size_limit); | |
| 819 } | |
| 820 } | |
| 821 | |
| 822 std::string video_buf_limit_switch = | |
| 823 cmd_line->GetSwitchValueASCII(switches::kMSEVideoBufferSizeLimit); | |
| 824 unsigned video_buf_size_limit = 0; | |
| 825 if (base::StringToUint(video_buf_limit_switch, &video_buf_size_limit) && | |
| 826 video_buf_size_limit > 0) { | |
| 827 MEDIA_LOG(INFO, media_log_) << "Custom video SourceBuffer size limit=" | |
|
wolenetz
2016/09/13 21:03:14
nit ditto
servolk
2016/09/14 18:15:26
Done.
| |
| 828 << video_buf_size_limit; | |
| 829 for (const auto& it : video_streams_) { | |
| 830 it.second->SetStreamMemoryLimit(video_buf_size_limit); | |
| 831 } | |
| 832 } | |
| 833 } | |
| 834 | |
| 750 void MediaSourceState::OnNewMediaSegment() { | 835 void MediaSourceState::OnNewMediaSegment() { |
| 751 DVLOG(2) << "OnNewMediaSegment()"; | 836 DVLOG(2) << "OnNewMediaSegment()"; |
| 752 DCHECK_EQ(state_, PARSER_INITIALIZED); | 837 DCHECK_EQ(state_, PARSER_INITIALIZED); |
| 753 parsing_media_segment_ = true; | 838 parsing_media_segment_ = true; |
| 754 media_segment_contained_audio_frame_ = false; | 839 media_segment_has_data_for_track_.clear(); |
| 755 media_segment_contained_video_frame_ = false; | |
| 756 } | 840 } |
| 757 | 841 |
| 758 void MediaSourceState::OnEndOfMediaSegment() { | 842 void MediaSourceState::OnEndOfMediaSegment() { |
| 759 DVLOG(2) << "OnEndOfMediaSegment()"; | 843 DVLOG(2) << "OnEndOfMediaSegment()"; |
| 760 DCHECK_EQ(state_, PARSER_INITIALIZED); | 844 DCHECK_EQ(state_, PARSER_INITIALIZED); |
| 761 parsing_media_segment_ = false; | 845 parsing_media_segment_ = false; |
| 762 | 846 |
| 763 const bool missing_audio = audio_ && !media_segment_contained_audio_frame_; | 847 for (const auto& it : audio_streams_) { |
| 764 const bool missing_video = video_ && !media_segment_contained_video_frame_; | 848 if (!media_segment_has_data_for_track_[it.first]) { |
| 765 if (!missing_audio && !missing_video) | 849 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_missing_track_logs_, |
| 766 return; | 850 kMaxMissingTrackInSegmentLogs) |
| 767 | 851 << "Media segment did not contain any coded frames for track " |
| 768 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_missing_track_logs_, | 852 << it.first << ", mismatching initialization segment. Therefore, MSE" |
| 769 kMaxMissingTrackInSegmentLogs) | 853 " coded frame processing may not interoperably detect" |
| 770 << "Media segment did not contain any " | 854 " discontinuities in appended media."; |
| 771 << (missing_audio && missing_video ? "audio or video" | 855 } |
| 772 : missing_audio ? "audio" : "video") | 856 } |
| 773 << " coded frames, mismatching initialization segment. Therefore, MSE " | 857 for (const auto& it : video_streams_) { |
| 774 "coded frame processing may not interoperably detect discontinuities " | 858 if (!media_segment_has_data_for_track_[it.first]) { |
| 775 "in appended media."; | 859 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_missing_track_logs_, |
| 860 kMaxMissingTrackInSegmentLogs) | |
| 861 << "Media segment did not contain any coded frames for track " | |
| 862 << it.first << ", mismatching initialization segment. Therefore, MSE" | |
| 863 " coded frame processing may not interoperably detect" | |
| 864 " discontinuities in appended media."; | |
| 865 } | |
| 866 } | |
| 776 } | 867 } |
| 777 | 868 |
| 778 bool MediaSourceState::OnNewBuffers( | 869 bool MediaSourceState::OnNewBuffers( |
| 779 const StreamParser::BufferQueueMap& buffer_queue_map) { | 870 const StreamParser::BufferQueueMap& buffer_queue_map) { |
| 780 DVLOG(2) << "OnNewBuffers()"; | 871 DVLOG(2) << __func__ << " buffer_queues=" << buffer_queue_map.size(); |
| 781 DCHECK_EQ(state_, PARSER_INITIALIZED); | 872 DCHECK_EQ(state_, PARSER_INITIALIZED); |
| 782 DCHECK(timestamp_offset_during_append_); | 873 DCHECK(timestamp_offset_during_append_); |
| 783 DCHECK(parsing_media_segment_); | 874 DCHECK(parsing_media_segment_); |
| 784 | 875 |
| 785 for (const auto& it : buffer_queue_map) { | 876 for (const auto& it : buffer_queue_map) { |
| 786 const StreamParser::BufferQueue& bufq = it.second; | 877 const StreamParser::BufferQueue& bufq = it.second; |
| 787 DCHECK(!bufq.empty()); | 878 DCHECK(!bufq.empty()); |
| 788 if (bufq[0]->type() == DemuxerStream::AUDIO) { | 879 media_segment_has_data_for_track_[it.first] = true; |
| 789 media_segment_contained_audio_frame_ = true; | |
| 790 } else if (bufq[0]->type() == DemuxerStream::VIDEO) { | |
| 791 media_segment_contained_video_frame_ = true; | |
| 792 } | |
| 793 } | 880 } |
| 794 | 881 |
| 795 const TimeDelta timestamp_offset_before_processing = | 882 const TimeDelta timestamp_offset_before_processing = |
| 796 *timestamp_offset_during_append_; | 883 *timestamp_offset_during_append_; |
| 797 | 884 |
| 798 // Calculate the new timestamp offset for audio/video tracks if the stream | 885 // Calculate the new timestamp offset for audio/video tracks if the stream |
| 799 // parser has requested automatic updates. | 886 // parser has requested automatic updates. |
| 800 TimeDelta new_timestamp_offset = timestamp_offset_before_processing; | 887 TimeDelta new_timestamp_offset = timestamp_offset_before_processing; |
| 801 if (auto_update_timestamp_offset_) { | 888 if (auto_update_timestamp_offset_) { |
| 802 TimeDelta min_end_timestamp = kNoTimestamp; | 889 TimeDelta min_end_timestamp = kNoTimestamp; |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 820 } | 907 } |
| 821 | 908 |
| 822 // Only update the timestamp offset if the frame processor hasn't already. | 909 // Only update the timestamp offset if the frame processor hasn't already. |
| 823 if (auto_update_timestamp_offset_ && | 910 if (auto_update_timestamp_offset_ && |
| 824 timestamp_offset_before_processing == *timestamp_offset_during_append_) { | 911 timestamp_offset_before_processing == *timestamp_offset_during_append_) { |
| 825 *timestamp_offset_during_append_ = new_timestamp_offset; | 912 *timestamp_offset_during_append_ = new_timestamp_offset; |
| 826 } | 913 } |
| 827 | 914 |
| 828 return true; | 915 return true; |
| 829 } | 916 } |
| 830 | |
| 831 void MediaSourceState::OnSourceInitDone( | 917 void MediaSourceState::OnSourceInitDone( |
| 832 const StreamParser::InitParameters& params) { | 918 const StreamParser::InitParameters& params) { |
| 833 DCHECK_EQ(state_, PENDING_PARSER_INIT); | 919 DCHECK_EQ(state_, PENDING_PARSER_INIT); |
| 834 state_ = PARSER_INITIALIZED; | 920 state_ = PARSER_INITIALIZED; |
| 835 auto_update_timestamp_offset_ = params.auto_update_timestamp_offset; | 921 auto_update_timestamp_offset_ = params.auto_update_timestamp_offset; |
| 836 base::ResetAndReturn(&init_cb_).Run(params); | 922 base::ResetAndReturn(&init_cb_).Run(params); |
| 837 } | 923 } |
| 838 | 924 |
| 839 } // namespace media | 925 } // namespace media |
| OLD | NEW |