Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/media_source_state.h" | 5 #include "media/filters/media_source_state.h" |
| 6 | 6 |
| 7 #include <set> | |
| 8 | |
| 7 #include "base/callback_helpers.h" | 9 #include "base/callback_helpers.h" |
| 8 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| 9 #include "base/stl_util.h" | 11 #include "base/stl_util.h" |
| 10 #include "base/strings/string_number_conversions.h" | 12 #include "base/strings/string_number_conversions.h" |
| 11 #include "media/base/media_switches.h" | 13 #include "media/base/media_switches.h" |
| 12 #include "media/base/media_track.h" | 14 #include "media/base/media_track.h" |
| 13 #include "media/base/media_tracks.h" | 15 #include "media/base/media_tracks.h" |
| 16 #include "media/base/mime_util.h" | |
| 14 #include "media/filters/chunk_demuxer.h" | 17 #include "media/filters/chunk_demuxer.h" |
| 15 #include "media/filters/frame_processor.h" | 18 #include "media/filters/frame_processor.h" |
| 16 #include "media/filters/source_buffer_stream.h" | 19 #include "media/filters/source_buffer_stream.h" |
| 17 | 20 |
| 18 namespace media { | 21 namespace media { |
| 19 | 22 |
| 20 enum { | 23 enum { |
| 21 // Limits the number of MEDIA_LOG() calls warning the user that a muxed stream | 24 // Limits the number of MEDIA_LOG() calls warning the user that a muxed stream |
| 22 // media segment is missing a block from at least one of the audio or video | 25 // media segment is missing a block from at least one of the audio or video |
| 23 // tracks. | 26 // tracks. |
| 24 kMaxMissingTrackInSegmentLogs = 10, | 27 kMaxMissingTrackInSegmentLogs = 10, |
| 25 }; | 28 }; |
| 26 | 29 |
| 27 static TimeDelta EndTimestamp(const StreamParser::BufferQueue& queue) { | 30 namespace { |
| 31 | |
| 32 TimeDelta EndTimestamp(const StreamParser::BufferQueue& queue) { | |
| 28 return queue.back()->timestamp() + queue.back()->duration(); | 33 return queue.back()->timestamp() + queue.back()->duration(); |
| 29 } | 34 } |
| 30 | 35 |
| 36 // Check the input |text_configs| and |bytestream_ids| and return false if | |
| 37 // duplicate track ids are detected. | |
| 38 bool CheckBytestreamTrackIds( | |
| 39 const MediaTracks& tracks, | |
| 40 const StreamParser::TextTrackConfigMap& text_configs) { | |
| 41 std::set<StreamParser::TrackId> bytestream_ids; | |
| 42 for (const auto& track : tracks.tracks()) { | |
| 43 const StreamParser::TrackId& track_id = track->bytestream_track_id(); | |
| 44 if (bytestream_ids.find(track_id) != bytestream_ids.end()) { | |
| 45 return false; | |
| 46 } | |
| 47 bytestream_ids.insert(track_id); | |
| 48 } | |
| 49 for (const auto& text_track : text_configs) { | |
| 50 const StreamParser::TrackId& track_id = text_track.first; | |
| 51 if (bytestream_ids.find(track_id) != bytestream_ids.end()) { | |
| 52 return false; | |
| 53 } | |
| 54 bytestream_ids.insert(track_id); | |
| 55 } | |
| 56 return true; | |
| 57 } | |
| 58 | |
| 59 } // namespace | |
| 60 | |
| 31 // List of time ranges for each SourceBuffer. | 61 // List of time ranges for each SourceBuffer. |
| 32 // static | 62 // static |
| 33 Ranges<TimeDelta> MediaSourceState::ComputeRangesIntersection( | 63 Ranges<TimeDelta> MediaSourceState::ComputeRangesIntersection( |
| 34 const RangesList& active_ranges, | 64 const RangesList& active_ranges, |
| 35 bool ended) { | 65 bool ended) { |
| 36 // TODO(servolk): Perhaps this can be removed in favor of blink implementation | 66 // TODO(servolk): Perhaps this can be removed in favor of blink implementation |
| 37 // (MediaSource::buffered)? Currently this is only used on Android and for | 67 // (MediaSource::buffered)? Currently this is only used on Android and for |
| 38 // updating DemuxerHost's buffered ranges during AppendData() as well as | 68 // updating DemuxerHost's buffered ranges during AppendData() as well as |
| 39 // SourceBuffer.buffered property implemetation. | 69 // SourceBuffer.buffered property implemetation. |
| 40 // Implementation of HTMLMediaElement.buffered algorithm in MSE spec. | 70 // Implementation of HTMLMediaElement.buffered algorithm in MSE spec. |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 87 } | 117 } |
| 88 | 118 |
| 89 MediaSourceState::MediaSourceState( | 119 MediaSourceState::MediaSourceState( |
| 90 std::unique_ptr<StreamParser> stream_parser, | 120 std::unique_ptr<StreamParser> stream_parser, |
| 91 std::unique_ptr<FrameProcessor> frame_processor, | 121 std::unique_ptr<FrameProcessor> frame_processor, |
| 92 const CreateDemuxerStreamCB& create_demuxer_stream_cb, | 122 const CreateDemuxerStreamCB& create_demuxer_stream_cb, |
| 93 const scoped_refptr<MediaLog>& media_log) | 123 const scoped_refptr<MediaLog>& media_log) |
| 94 : create_demuxer_stream_cb_(create_demuxer_stream_cb), | 124 : create_demuxer_stream_cb_(create_demuxer_stream_cb), |
| 95 timestamp_offset_during_append_(NULL), | 125 timestamp_offset_during_append_(NULL), |
| 96 parsing_media_segment_(false), | 126 parsing_media_segment_(false), |
| 97 media_segment_contained_audio_frame_(false), | |
| 98 media_segment_contained_video_frame_(false), | |
| 99 stream_parser_(stream_parser.release()), | 127 stream_parser_(stream_parser.release()), |
| 100 audio_(NULL), | |
| 101 video_(NULL), | |
| 102 frame_processor_(frame_processor.release()), | 128 frame_processor_(frame_processor.release()), |
| 103 media_log_(media_log), | 129 media_log_(media_log), |
| 104 state_(UNINITIALIZED), | 130 state_(UNINITIALIZED), |
| 105 auto_update_timestamp_offset_(false) { | 131 auto_update_timestamp_offset_(false) { |
| 106 DCHECK(!create_demuxer_stream_cb_.is_null()); | 132 DCHECK(!create_demuxer_stream_cb_.is_null()); |
| 107 DCHECK(frame_processor_); | 133 DCHECK(frame_processor_); |
| 108 } | 134 } |
| 109 | 135 |
| 110 MediaSourceState::~MediaSourceState() { | 136 MediaSourceState::~MediaSourceState() { |
| 111 Shutdown(); | 137 Shutdown(); |
| 112 | 138 |
| 113 base::STLDeleteValues(&text_stream_map_); | 139 base::STLDeleteValues(&text_stream_map_); |
| 114 } | 140 } |
| 115 | 141 |
| 116 void MediaSourceState::Init( | 142 void MediaSourceState::Init( |
| 117 const StreamParser::InitCB& init_cb, | 143 const StreamParser::InitCB& init_cb, |
| 118 bool allow_audio, | 144 const std::string& expected_codecs, |
| 119 bool allow_video, | |
| 120 const StreamParser::EncryptedMediaInitDataCB& encrypted_media_init_data_cb, | 145 const StreamParser::EncryptedMediaInitDataCB& encrypted_media_init_data_cb, |
| 121 const NewTextTrackCB& new_text_track_cb) { | 146 const NewTextTrackCB& new_text_track_cb) { |
| 122 DCHECK_EQ(state_, UNINITIALIZED); | 147 DCHECK_EQ(state_, UNINITIALIZED); |
| 123 new_text_track_cb_ = new_text_track_cb; | 148 new_text_track_cb_ = new_text_track_cb; |
| 124 init_cb_ = init_cb; | 149 init_cb_ = init_cb; |
| 125 | 150 |
| 151 std::vector<std::string> expected_codecs_parsed; | |
| 152 ParseCodecString(expected_codecs, &expected_codecs_parsed, false); | |
| 153 | |
| 154 std::vector<AudioCodec> expected_acodecs; | |
| 155 std::vector<VideoCodec> expected_vcodecs; | |
| 156 for (const auto& codec_id : expected_codecs_parsed) { | |
| 157 AudioCodec acodec = StringToAudioCodec(codec_id); | |
| 158 if (acodec != kUnknownAudioCodec) { | |
| 159 expected_audio_codecs_.push_back(acodec); | |
| 160 continue; | |
| 161 } | |
| 162 VideoCodec vcodec = StringToVideoCodec(codec_id); | |
| 163 if (vcodec != kUnknownVideoCodec) { | |
| 164 expected_video_codecs_.push_back(vcodec); | |
| 165 continue; | |
| 166 } | |
| 167 MEDIA_LOG(INFO, media_log_) << "Unrecognized media codec: " << codec_id; | |
| 168 } | |
| 169 | |
| 126 state_ = PENDING_PARSER_CONFIG; | 170 state_ = PENDING_PARSER_CONFIG; |
| 127 stream_parser_->Init( | 171 stream_parser_->Init( |
| 128 base::Bind(&MediaSourceState::OnSourceInitDone, base::Unretained(this)), | 172 base::Bind(&MediaSourceState::OnSourceInitDone, base::Unretained(this)), |
| 129 base::Bind(&MediaSourceState::OnNewConfigs, base::Unretained(this), | 173 base::Bind(&MediaSourceState::OnNewConfigs, base::Unretained(this), |
| 130 allow_audio, allow_video), | 174 expected_codecs), |
| 131 base::Bind(&MediaSourceState::OnNewBuffers, base::Unretained(this)), | 175 base::Bind(&MediaSourceState::OnNewBuffers, base::Unretained(this)), |
| 132 new_text_track_cb_.is_null(), encrypted_media_init_data_cb, | 176 new_text_track_cb_.is_null(), encrypted_media_init_data_cb, |
| 133 base::Bind(&MediaSourceState::OnNewMediaSegment, base::Unretained(this)), | 177 base::Bind(&MediaSourceState::OnNewMediaSegment, base::Unretained(this)), |
| 134 base::Bind(&MediaSourceState::OnEndOfMediaSegment, | 178 base::Bind(&MediaSourceState::OnEndOfMediaSegment, |
| 135 base::Unretained(this)), | 179 base::Unretained(this)), |
| 136 media_log_); | 180 media_log_); |
| 137 } | 181 } |
| 138 | 182 |
| 139 void MediaSourceState::SetSequenceMode(bool sequence_mode) { | 183 void MediaSourceState::SetSequenceMode(bool sequence_mode) { |
| 140 DCHECK(!parsing_media_segment_); | 184 DCHECK(!parsing_media_segment_); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 189 DCHECK(!timestamp_offset_during_append_); | 233 DCHECK(!timestamp_offset_during_append_); |
| 190 timestamp_offset_during_append_ = timestamp_offset; | 234 timestamp_offset_during_append_ = timestamp_offset; |
| 191 append_window_start_during_append_ = append_window_start; | 235 append_window_start_during_append_ = append_window_start; |
| 192 append_window_end_during_append_ = append_window_end; | 236 append_window_end_during_append_ = append_window_end; |
| 193 | 237 |
| 194 stream_parser_->Flush(); | 238 stream_parser_->Flush(); |
| 195 timestamp_offset_during_append_ = NULL; | 239 timestamp_offset_during_append_ = NULL; |
| 196 | 240 |
| 197 frame_processor_->Reset(); | 241 frame_processor_->Reset(); |
| 198 parsing_media_segment_ = false; | 242 parsing_media_segment_ = false; |
| 199 media_segment_contained_audio_frame_ = false; | 243 media_segment_has_data_for_track_.clear(); |
| 200 media_segment_contained_video_frame_ = false; | |
| 201 } | 244 } |
| 202 | 245 |
| 203 void MediaSourceState::Remove(TimeDelta start, | 246 void MediaSourceState::Remove(TimeDelta start, |
| 204 TimeDelta end, | 247 TimeDelta end, |
| 205 TimeDelta duration) { | 248 TimeDelta duration) { |
| 206 if (audio_) | 249 for (const auto& it : audio_streams_) { |
| 207 audio_->Remove(start, end, duration); | 250 it.second->Remove(start, end, duration); |
| 251 } | |
| 208 | 252 |
| 209 if (video_) | 253 for (const auto& it : video_streams_) { |
| 210 video_->Remove(start, end, duration); | 254 it.second->Remove(start, end, duration); |
| 255 } | |
| 211 | 256 |
| 212 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 257 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 213 itr != text_stream_map_.end(); ++itr) { | 258 itr != text_stream_map_.end(); ++itr) { |
| 214 itr->second->Remove(start, end, duration); | 259 itr->second->Remove(start, end, duration); |
| 215 } | 260 } |
| 216 } | 261 } |
| 217 | 262 |
| 218 size_t MediaSourceState::EstimateVideoDataSize( | |
| 219 size_t muxed_data_chunk_size) const { | |
| 220 DCHECK(audio_); | |
| 221 DCHECK(video_); | |
| 222 | |
| 223 size_t videoBufferedSize = video_->GetBufferedSize(); | |
| 224 size_t audioBufferedSize = audio_->GetBufferedSize(); | |
| 225 if (videoBufferedSize == 0 || audioBufferedSize == 0) { | |
| 226 // At this point either audio or video buffer is empty, which means buffer | |
| 227 // levels are probably low anyway and we should have enough space in the | |
| 228 // buffers for appending new data, so just take a very rough guess. | |
| 229 return muxed_data_chunk_size * 7 / 8; | |
| 230 } | |
| 231 | |
| 232 // We need to estimate how much audio and video data is going to be in the | |
| 233 // newly appended data chunk to make space for the new data. And we need to do | |
| 234 // that without parsing the data (which will happen later, in the Append | |
| 235 // phase). So for now we can only rely on some heuristic here. Let's assume | |
| 236 // that the proportion of the audio/video in the new data chunk is the same as | |
| 237 // the current ratio of buffered audio/video. | |
| 238 // Longer term this should go away once we further change the MSE GC algorithm | |
| 239 // to work across all streams of a SourceBuffer (see crbug.com/520704). | |
| 240 double videoBufferedSizeF = static_cast<double>(videoBufferedSize); | |
| 241 double audioBufferedSizeF = static_cast<double>(audioBufferedSize); | |
| 242 | |
| 243 double totalBufferedSizeF = videoBufferedSizeF + audioBufferedSizeF; | |
| 244 CHECK_GT(totalBufferedSizeF, 0.0); | |
| 245 | |
| 246 double videoRatio = videoBufferedSizeF / totalBufferedSizeF; | |
| 247 CHECK_GE(videoRatio, 0.0); | |
| 248 CHECK_LE(videoRatio, 1.0); | |
| 249 double estimatedVideoSize = muxed_data_chunk_size * videoRatio; | |
| 250 return static_cast<size_t>(estimatedVideoSize); | |
| 251 } | |
| 252 | |
| 253 bool MediaSourceState::EvictCodedFrames(DecodeTimestamp media_time, | 263 bool MediaSourceState::EvictCodedFrames(DecodeTimestamp media_time, |
| 254 size_t newDataSize) { | 264 size_t newDataSize) { |
| 255 bool success = true; | 265 size_t total_buffered_size = 0; |
| 266 for (const auto& it : audio_streams_) | |
| 267 total_buffered_size += it.second->GetBufferedSize(); | |
| 268 for (const auto& it : video_streams_) | |
| 269 total_buffered_size += it.second->GetBufferedSize(); | |
| 270 for (const auto& it : text_stream_map_) | |
| 271 total_buffered_size += it.second->GetBufferedSize(); | |
| 256 | 272 |
| 257 DVLOG(3) << __func__ << " media_time=" << media_time.InSecondsF() | 273 DVLOG(3) << __func__ << " media_time=" << media_time.InSecondsF() |
| 258 << " newDataSize=" << newDataSize | 274 << " newDataSize=" << newDataSize |
| 259 << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0) | 275 << " total_buffered_size=" << total_buffered_size; |
| 260 << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0); | |
| 261 | 276 |
| 262 size_t newAudioSize = 0; | 277 if (total_buffered_size == 0) |
| 263 size_t newVideoSize = 0; | 278 return true; |
| 264 if (audio_ && video_) { | 279 |
| 265 newVideoSize = EstimateVideoDataSize(newDataSize); | 280 bool success = true; |
| 266 newAudioSize = newDataSize - newVideoSize; | 281 for (const auto& it : audio_streams_) { |
| 267 } else if (video_) { | 282 size_t curr_size = it.second->GetBufferedSize(); |
| 268 newVideoSize = newDataSize; | 283 if (curr_size == 0) |
| 269 } else if (audio_) { | 284 continue; |
| 270 newAudioSize = newDataSize; | 285 size_t estimated_new_size = newDataSize * curr_size / total_buffered_size; |
| 286 success &= it.second->EvictCodedFrames(media_time, estimated_new_size); | |
| 287 } | |
| 288 for (const auto& it : video_streams_) { | |
| 289 size_t curr_size = it.second->GetBufferedSize(); | |
| 290 if (curr_size == 0) | |
| 291 continue; | |
| 292 size_t estimated_new_size = newDataSize * curr_size / total_buffered_size; | |
| 293 success &= it.second->EvictCodedFrames(media_time, estimated_new_size); | |
| 294 } | |
| 295 for (const auto& it : text_stream_map_) { | |
| 296 size_t curr_size = it.second->GetBufferedSize(); | |
| 297 if (curr_size == 0) | |
| 298 continue; | |
| 299 size_t estimated_new_size = newDataSize * curr_size / total_buffered_size; | |
| 300 success &= it.second->EvictCodedFrames(media_time, estimated_new_size); | |
| 271 } | 301 } |
| 272 | 302 |
| 273 DVLOG(3) << __func__ | 303 DVLOG(3) << __func__ << " success=" << success; |
| 274 << " estimated audio/video sizes: newVideoSize=" << newVideoSize | |
| 275 << " newAudioSize=" << newAudioSize; | |
| 276 | |
| 277 if (audio_) | |
| 278 success = audio_->EvictCodedFrames(media_time, newAudioSize) && success; | |
| 279 | |
| 280 if (video_) | |
| 281 success = video_->EvictCodedFrames(media_time, newVideoSize) && success; | |
| 282 | |
| 283 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | |
| 284 itr != text_stream_map_.end(); ++itr) { | |
| 285 success = itr->second->EvictCodedFrames(media_time, 0) && success; | |
| 286 } | |
| 287 | |
| 288 DVLOG(3) << __func__ << " result=" << success | |
| 289 << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0) | |
| 290 << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0); | |
| 291 | |
| 292 return success; | 304 return success; |
| 293 } | 305 } |
| 294 | 306 |
| 295 Ranges<TimeDelta> MediaSourceState::GetBufferedRanges(TimeDelta duration, | 307 Ranges<TimeDelta> MediaSourceState::GetBufferedRanges(TimeDelta duration, |
| 296 bool ended) const { | 308 bool ended) const { |
| 297 // TODO(acolwell): When we start allowing disabled tracks we'll need to update | |
| 298 // this code to only add ranges from active tracks. | |
| 299 RangesList ranges_list; | 309 RangesList ranges_list; |
| 300 if (audio_) | 310 for (const auto& it : audio_streams_) |
| 301 ranges_list.push_back(audio_->GetBufferedRanges(duration)); | 311 ranges_list.push_back(it.second->GetBufferedRanges(duration)); |
| 302 | 312 |
| 303 if (video_) | 313 for (const auto& it : video_streams_) |
| 304 ranges_list.push_back(video_->GetBufferedRanges(duration)); | 314 ranges_list.push_back(it.second->GetBufferedRanges(duration)); |
| 305 | 315 |
| 306 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); | 316 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); |
| 307 itr != text_stream_map_.end(); ++itr) { | 317 itr != text_stream_map_.end(); ++itr) { |
| 308 ranges_list.push_back(itr->second->GetBufferedRanges(duration)); | 318 ranges_list.push_back(itr->second->GetBufferedRanges(duration)); |
| 309 } | 319 } |
| 310 | 320 |
| 311 return ComputeRangesIntersection(ranges_list, ended); | 321 return ComputeRangesIntersection(ranges_list, ended); |
| 312 } | 322 } |
| 313 | 323 |
| 314 TimeDelta MediaSourceState::GetHighestPresentationTimestamp() const { | 324 TimeDelta MediaSourceState::GetHighestPresentationTimestamp() const { |
| 315 TimeDelta max_pts; | 325 TimeDelta max_pts; |
| 316 | 326 |
| 317 if (audio_) | 327 for (const auto& it : audio_streams_) { |
| 318 max_pts = std::max(max_pts, audio_->GetHighestPresentationTimestamp()); | 328 max_pts = std::max(max_pts, it.second->GetHighestPresentationTimestamp()); |
| 329 } | |
| 319 | 330 |
| 320 if (video_) | 331 for (const auto& it : video_streams_) { |
| 321 max_pts = std::max(max_pts, video_->GetHighestPresentationTimestamp()); | 332 max_pts = std::max(max_pts, it.second->GetHighestPresentationTimestamp()); |
| 333 } | |
| 322 | 334 |
| 323 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); | 335 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); |
| 324 itr != text_stream_map_.end(); ++itr) { | 336 itr != text_stream_map_.end(); ++itr) { |
| 325 max_pts = std::max(max_pts, itr->second->GetHighestPresentationTimestamp()); | 337 max_pts = std::max(max_pts, itr->second->GetHighestPresentationTimestamp()); |
| 326 } | 338 } |
| 327 | 339 |
| 328 return max_pts; | 340 return max_pts; |
| 329 } | 341 } |
| 330 | 342 |
| 331 TimeDelta MediaSourceState::GetMaxBufferedDuration() const { | 343 TimeDelta MediaSourceState::GetMaxBufferedDuration() const { |
| 332 TimeDelta max_duration; | 344 TimeDelta max_duration; |
| 333 | 345 |
| 334 if (audio_) | 346 for (const auto& it : audio_streams_) { |
| 335 max_duration = std::max(max_duration, audio_->GetBufferedDuration()); | 347 max_duration = std::max(max_duration, it.second->GetBufferedDuration()); |
| 348 } | |
| 336 | 349 |
| 337 if (video_) | 350 for (const auto& it : video_streams_) { |
| 338 max_duration = std::max(max_duration, video_->GetBufferedDuration()); | 351 max_duration = std::max(max_duration, it.second->GetBufferedDuration()); |
| 352 } | |
| 339 | 353 |
| 340 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); | 354 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); |
| 341 itr != text_stream_map_.end(); ++itr) { | 355 itr != text_stream_map_.end(); ++itr) { |
| 342 max_duration = std::max(max_duration, itr->second->GetBufferedDuration()); | 356 max_duration = std::max(max_duration, itr->second->GetBufferedDuration()); |
| 343 } | 357 } |
| 344 | 358 |
| 345 return max_duration; | 359 return max_duration; |
| 346 } | 360 } |
| 347 | 361 |
| 348 void MediaSourceState::StartReturningData() { | 362 void MediaSourceState::StartReturningData() { |
| 349 if (audio_) | 363 for (const auto& it : audio_streams_) { |
| 350 audio_->StartReturningData(); | 364 it.second->StartReturningData(); |
| 365 } | |
| 351 | 366 |
| 352 if (video_) | 367 for (const auto& it : video_streams_) { |
| 353 video_->StartReturningData(); | 368 it.second->StartReturningData(); |
| 369 } | |
| 354 | 370 |
| 355 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 371 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 356 itr != text_stream_map_.end(); ++itr) { | 372 itr != text_stream_map_.end(); ++itr) { |
| 357 itr->second->StartReturningData(); | 373 itr->second->StartReturningData(); |
| 358 } | 374 } |
| 359 } | 375 } |
| 360 | 376 |
| 361 void MediaSourceState::AbortReads() { | 377 void MediaSourceState::AbortReads() { |
| 362 if (audio_) | 378 for (const auto& it : audio_streams_) { |
| 363 audio_->AbortReads(); | 379 it.second->AbortReads(); |
| 380 } | |
| 364 | 381 |
| 365 if (video_) | 382 for (const auto& it : video_streams_) { |
| 366 video_->AbortReads(); | 383 it.second->AbortReads(); |
| 384 } | |
| 367 | 385 |
| 368 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 386 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 369 itr != text_stream_map_.end(); ++itr) { | 387 itr != text_stream_map_.end(); ++itr) { |
| 370 itr->second->AbortReads(); | 388 itr->second->AbortReads(); |
| 371 } | 389 } |
| 372 } | 390 } |
| 373 | 391 |
| 374 void MediaSourceState::Seek(TimeDelta seek_time) { | 392 void MediaSourceState::Seek(TimeDelta seek_time) { |
| 375 if (audio_) | 393 for (const auto& it : audio_streams_) { |
| 376 audio_->Seek(seek_time); | 394 it.second->Seek(seek_time); |
| 395 } | |
| 377 | 396 |
| 378 if (video_) | 397 for (const auto& it : video_streams_) { |
| 379 video_->Seek(seek_time); | 398 it.second->Seek(seek_time); |
| 399 } | |
| 380 | 400 |
| 381 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 401 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 382 itr != text_stream_map_.end(); ++itr) { | 402 itr != text_stream_map_.end(); ++itr) { |
| 383 itr->second->Seek(seek_time); | 403 itr->second->Seek(seek_time); |
| 384 } | 404 } |
| 385 } | 405 } |
| 386 | 406 |
| 387 void MediaSourceState::CompletePendingReadIfPossible() { | 407 void MediaSourceState::CompletePendingReadIfPossible() { |
| 388 if (audio_) | 408 for (const auto& it : audio_streams_) { |
| 389 audio_->CompletePendingReadIfPossible(); | 409 it.second->CompletePendingReadIfPossible(); |
| 410 } | |
| 390 | 411 |
| 391 if (video_) | 412 for (const auto& it : video_streams_) { |
| 392 video_->CompletePendingReadIfPossible(); | 413 it.second->CompletePendingReadIfPossible(); |
| 414 } | |
| 393 | 415 |
| 394 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 416 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 395 itr != text_stream_map_.end(); ++itr) { | 417 itr != text_stream_map_.end(); ++itr) { |
| 396 itr->second->CompletePendingReadIfPossible(); | 418 itr->second->CompletePendingReadIfPossible(); |
| 397 } | 419 } |
| 398 } | 420 } |
| 399 | 421 |
| 400 void MediaSourceState::OnSetDuration(TimeDelta duration) { | 422 void MediaSourceState::OnSetDuration(TimeDelta duration) { |
| 401 if (audio_) | 423 for (const auto& it : audio_streams_) { |
| 402 audio_->OnSetDuration(duration); | 424 it.second->OnSetDuration(duration); |
| 425 } | |
| 403 | 426 |
| 404 if (video_) | 427 for (const auto& it : video_streams_) { |
| 405 video_->OnSetDuration(duration); | 428 it.second->OnSetDuration(duration); |
| 429 } | |
| 406 | 430 |
| 407 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 431 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 408 itr != text_stream_map_.end(); ++itr) { | 432 itr != text_stream_map_.end(); ++itr) { |
| 409 itr->second->OnSetDuration(duration); | 433 itr->second->OnSetDuration(duration); |
| 410 } | 434 } |
| 411 } | 435 } |
| 412 | 436 |
| 413 void MediaSourceState::MarkEndOfStream() { | 437 void MediaSourceState::MarkEndOfStream() { |
| 414 if (audio_) | 438 for (const auto& it : audio_streams_) { |
| 415 audio_->MarkEndOfStream(); | 439 it.second->MarkEndOfStream(); |
| 440 } | |
| 416 | 441 |
| 417 if (video_) | 442 for (const auto& it : video_streams_) { |
| 418 video_->MarkEndOfStream(); | 443 it.second->MarkEndOfStream(); |
| 444 } | |
| 419 | 445 |
| 420 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 446 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 421 itr != text_stream_map_.end(); ++itr) { | 447 itr != text_stream_map_.end(); ++itr) { |
| 422 itr->second->MarkEndOfStream(); | 448 itr->second->MarkEndOfStream(); |
| 423 } | 449 } |
| 424 } | 450 } |
| 425 | 451 |
| 426 void MediaSourceState::UnmarkEndOfStream() { | 452 void MediaSourceState::UnmarkEndOfStream() { |
| 427 if (audio_) | 453 for (const auto& it : audio_streams_) { |
| 428 audio_->UnmarkEndOfStream(); | 454 it.second->UnmarkEndOfStream(); |
| 455 } | |
| 429 | 456 |
| 430 if (video_) | 457 for (const auto& it : video_streams_) { |
| 431 video_->UnmarkEndOfStream(); | 458 it.second->UnmarkEndOfStream(); |
| 459 } | |
| 432 | 460 |
| 433 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 461 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 434 itr != text_stream_map_.end(); ++itr) { | 462 itr != text_stream_map_.end(); ++itr) { |
| 435 itr->second->UnmarkEndOfStream(); | 463 itr->second->UnmarkEndOfStream(); |
| 436 } | 464 } |
| 437 } | 465 } |
| 438 | 466 |
| 439 void MediaSourceState::Shutdown() { | 467 void MediaSourceState::Shutdown() { |
| 440 if (audio_) | 468 for (const auto& it : audio_streams_) { |
| 441 audio_->Shutdown(); | 469 it.second->Shutdown(); |
| 470 } | |
| 442 | 471 |
| 443 if (video_) | 472 for (const auto& it : video_streams_) { |
| 444 video_->Shutdown(); | 473 it.second->Shutdown(); |
| 474 } | |
| 445 | 475 |
| 446 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 476 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 447 itr != text_stream_map_.end(); ++itr) { | 477 itr != text_stream_map_.end(); ++itr) { |
| 448 itr->second->Shutdown(); | 478 itr->second->Shutdown(); |
| 449 } | 479 } |
| 450 } | 480 } |
| 451 | 481 |
| 452 void MediaSourceState::SetMemoryLimits(DemuxerStream::Type type, | 482 void MediaSourceState::SetMemoryLimits(DemuxerStream::Type type, |
| 453 size_t memory_limit) { | 483 size_t memory_limit) { |
| 454 switch (type) { | 484 switch (type) { |
| 455 case DemuxerStream::AUDIO: | 485 case DemuxerStream::AUDIO: |
| 456 if (audio_) | 486 for (const auto& it : audio_streams_) { |
| 457 audio_->SetStreamMemoryLimit(memory_limit); | 487 it.second->SetStreamMemoryLimit(memory_limit); |
| 488 } | |
| 458 break; | 489 break; |
| 459 case DemuxerStream::VIDEO: | 490 case DemuxerStream::VIDEO: |
| 460 if (video_) | 491 for (const auto& it : video_streams_) { |
| 461 video_->SetStreamMemoryLimit(memory_limit); | 492 it.second->SetStreamMemoryLimit(memory_limit); |
| 493 } | |
| 462 break; | 494 break; |
| 463 case DemuxerStream::TEXT: | 495 case DemuxerStream::TEXT: |
| 464 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 496 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
| 465 itr != text_stream_map_.end(); ++itr) { | 497 itr != text_stream_map_.end(); ++itr) { |
| 466 itr->second->SetStreamMemoryLimit(memory_limit); | 498 itr->second->SetStreamMemoryLimit(memory_limit); |
| 467 } | 499 } |
| 468 break; | 500 break; |
| 469 case DemuxerStream::UNKNOWN: | 501 case DemuxerStream::UNKNOWN: |
| 470 case DemuxerStream::NUM_TYPES: | 502 case DemuxerStream::NUM_TYPES: |
| 471 NOTREACHED(); | 503 NOTREACHED(); |
| 472 break; | 504 break; |
| 473 } | 505 } |
| 474 } | 506 } |
| 475 | 507 |
| 476 bool MediaSourceState::IsSeekWaitingForData() const { | 508 bool MediaSourceState::IsSeekWaitingForData() const { |
| 477 if (audio_ && audio_->IsSeekWaitingForData()) | 509 for (const auto& it : audio_streams_) { |
| 478 return true; | 510 if (it.second->IsSeekWaitingForData()) |
| 511 return true; | |
| 512 } | |
| 479 | 513 |
| 480 if (video_ && video_->IsSeekWaitingForData()) | 514 for (const auto& it : video_streams_) { |
| 481 return true; | 515 if (it.second->IsSeekWaitingForData()) |
| 516 return true; | |
| 517 } | |
| 482 | 518 |
| 483 // NOTE: We are intentionally not checking the text tracks | 519 // NOTE: We are intentionally not checking the text tracks |
| 484 // because text tracks are discontinuous and may not have data | 520 // because text tracks are discontinuous and may not have data |
| 485 // for the seek position. This is ok and playback should not be | 521 // for the seek position. This is ok and playback should not be |
| 486 // stalled because we don't have cues. If cues, with timestamps after | 522 // stalled because we don't have cues. If cues, with timestamps after |
| 487 // the seek time, eventually arrive they will be delivered properly | 523 // the seek time, eventually arrive they will be delivered properly |
| 488 // in response to ChunkDemuxerStream::Read() calls. | 524 // in response to ChunkDemuxerStream::Read() calls. |
| 489 | 525 |
| 490 return false; | 526 return false; |
| 491 } | 527 } |
| 492 | 528 |
| 493 bool MediaSourceState::OnNewConfigs( | 529 bool MediaSourceState::OnNewConfigs( |
| 494 bool allow_audio, | 530 std::string expected_codecs, |
| 495 bool allow_video, | |
| 496 std::unique_ptr<MediaTracks> tracks, | 531 std::unique_ptr<MediaTracks> tracks, |
| 497 const StreamParser::TextTrackConfigMap& text_configs) { | 532 const StreamParser::TextTrackConfigMap& text_configs) { |
| 533 DCHECK(tracks.get()); | |
| 534 DVLOG(1) << __func__ << " expected_codecs=" << expected_codecs | |
| 535 << " tracks=" << tracks->tracks().size(); | |
| 498 DCHECK_GE(state_, PENDING_PARSER_CONFIG); | 536 DCHECK_GE(state_, PENDING_PARSER_CONFIG); |
| 499 DCHECK(tracks.get()); | |
| 500 | 537 |
| 501 MediaTrack* audio_track = nullptr; | 538 // Check that there is no clashing bytestream track ids. |
| 502 MediaTrack* video_track = nullptr; | 539 if (!CheckBytestreamTrackIds(*tracks, text_configs)) { |
| 503 AudioDecoderConfig audio_config; | 540 MEDIA_LOG(ERROR, media_log_) << "Duplicate bytestream track ids detected"; |
| 504 VideoDecoderConfig video_config; | 541 for (const auto& track : tracks->tracks()) { |
| 542 const StreamParser::TrackId& track_id = track->bytestream_track_id(); | |
| 543 MEDIA_LOG(DEBUG, media_log_) << TrackTypeToStr(track->type()) << " track " | |
| 544 << " bytestream track id=" << track_id; | |
| 545 } | |
| 546 return false; | |
| 547 } | |
| 548 | |
| 549 // MSE spec allows new configs to be emitted only during Append, but not | |
| 550 // during Flush or parser reset operations. | |
| 551 CHECK(append_in_progress_); | |
| 552 | |
| 553 bool success = true; | |
| 554 | |
| 555 std::vector<AudioCodec> expected_acodecs = expected_audio_codecs_; | |
| 556 std::vector<VideoCodec> expected_vcodecs = expected_video_codecs_; | |
| 557 | |
| 505 for (const auto& track : tracks->tracks()) { | 558 for (const auto& track : tracks->tracks()) { |
| 506 const auto& track_id = track->bytestream_track_id(); | 559 const auto& track_id = track->bytestream_track_id(); |
| 507 | 560 |
| 508 if (track->type() == MediaTrack::Audio) { | 561 if (track->type() == MediaTrack::Audio) { |
| 509 if (audio_track) { | 562 AudioDecoderConfig audio_config = tracks->getAudioConfig(track_id); |
| 510 MEDIA_LOG(ERROR, media_log_) | 563 DVLOG(1) << "Audio track_id=" << track_id |
| 511 << "Error: more than one audio track is currently not supported."; | 564 << " config: " << audio_config.AsHumanReadableString(); |
| 565 DCHECK(audio_config.IsValidConfig()); | |
| 566 | |
| 567 const auto& it = std::find(expected_acodecs.begin(), | |
| 568 expected_acodecs.end(), audio_config.codec()); | |
| 569 if (it == expected_acodecs.end()) { | |
| 570 MEDIA_LOG(ERROR, media_log_) << "Audio stream codec " | |
| 571 << GetCodecName(audio_config.codec()) | |
| 572 << " doesn't match SourceBuffer codecs."; | |
| 512 return false; | 573 return false; |
| 513 } | 574 } |
| 514 audio_track = track.get(); | 575 expected_acodecs.erase(it); |
| 515 audio_config = tracks->getAudioConfig(track_id); | 576 |
| 516 DCHECK(audio_config.IsValidConfig()); | 577 ChunkDemuxerStream* stream = nullptr; |
| 578 if (!first_init_segment_received_) { | |
| 579 DCHECK(audio_streams_.find(track_id) == audio_streams_.end()); | |
| 580 stream = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO); | |
| 581 if (!stream || !frame_processor_->AddTrack(track_id, stream)) { | |
| 582 MEDIA_LOG(ERROR, media_log_) << "Failed to create audio stream."; | |
| 583 return false; | |
| 584 } | |
| 585 audio_streams_[track_id] = stream; | |
| 586 media_log_->SetBooleanProperty("found_audio_stream", true); | |
| 587 media_log_->SetStringProperty("audio_codec_name", | |
| 588 GetCodecName(audio_config.codec())); | |
| 589 } else { | |
| 590 if (audio_streams_.size() > 1) { | |
| 591 stream = audio_streams_[track_id]; | |
| 592 } else { | |
| 593 // If there is only one audio track then bytestream id might change in | |
| 594 // a new init segment. So update our state and nofity frame processor. | |
| 595 const auto& it = audio_streams_.begin(); | |
| 596 if (it != audio_streams_.end()) { | |
| 597 stream = it->second; | |
| 598 if (it->first != track_id) { | |
| 599 frame_processor_->UpdateTrack(it->first, track_id); | |
| 600 audio_streams_[track_id] = stream; | |
| 601 audio_streams_.erase(it->first); | |
| 602 } | |
| 603 } | |
| 604 } | |
| 605 if (!stream) { | |
| 606 MEDIA_LOG(ERROR, media_log_) << "Got unexpected audio track" | |
| 607 << " track_id=" << track_id; | |
| 608 return false; | |
| 609 } | |
| 610 } | |
| 611 | |
| 612 track->set_id(stream->media_track_id()); | |
| 613 frame_processor_->OnPossibleAudioConfigUpdate(audio_config); | |
| 614 success &= stream->UpdateAudioConfig(audio_config, media_log_); | |
| 517 } else if (track->type() == MediaTrack::Video) { | 615 } else if (track->type() == MediaTrack::Video) { |
| 518 if (video_track) { | 616 VideoDecoderConfig video_config = tracks->getVideoConfig(track_id); |
| 519 MEDIA_LOG(ERROR, media_log_) | 617 DVLOG(1) << "Video track_id=" << track_id |
| 520 << "Error: more than one video track is currently not supported."; | 618 << " config: " << video_config.AsHumanReadableString(); |
| 619 DCHECK(video_config.IsValidConfig()); | |
| 620 | |
| 621 const auto& it = std::find(expected_vcodecs.begin(), | |
| 622 expected_vcodecs.end(), video_config.codec()); | |
| 623 if (it == expected_vcodecs.end()) { | |
| 624 MEDIA_LOG(ERROR, media_log_) << "Video stream codec " | |
| 625 << GetCodecName(video_config.codec()) | |
| 626 << " doesn't match SourceBuffer codecs."; | |
| 521 return false; | 627 return false; |
| 522 } | 628 } |
| 523 video_track = track.get(); | 629 expected_vcodecs.erase(it); |
| 524 video_config = tracks->getVideoConfig(track_id); | 630 |
| 525 DCHECK(video_config.IsValidConfig()); | 631 ChunkDemuxerStream* stream = nullptr; |
| 632 if (!first_init_segment_received_) { | |
| 633 DCHECK(video_streams_.find(track_id) == video_streams_.end()); | |
| 634 stream = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO); | |
| 635 if (!stream || !frame_processor_->AddTrack(track_id, stream)) { | |
| 636 MEDIA_LOG(ERROR, media_log_) << "Failed to create video stream."; | |
| 637 return false; | |
| 638 } | |
| 639 video_streams_[track_id] = stream; | |
| 640 media_log_->SetBooleanProperty("found_video_stream", true); | |
| 641 media_log_->SetStringProperty("video_codec_name", | |
| 642 GetCodecName(video_config.codec())); | |
| 643 } else { | |
| 644 if (video_streams_.size() > 1) { | |
| 645 stream = video_streams_[track_id]; | |
| 646 } else { | |
| 647 // If there is only one video track then bytestream id might change in | |
| 648 // a new init segment. So update our state and nofity frame processor. | |
| 649 const auto& it = video_streams_.begin(); | |
| 650 if (it != video_streams_.end()) { | |
| 651 stream = it->second; | |
| 652 if (it->first != track_id) { | |
| 653 frame_processor_->UpdateTrack(it->first, track_id); | |
| 654 video_streams_[track_id] = stream; | |
| 655 video_streams_.erase(it->first); | |
| 656 } | |
| 657 } | |
| 658 } | |
| 659 if (!stream) { | |
| 660 MEDIA_LOG(ERROR, media_log_) << "Got unexpected video track" | |
| 661 << " track_id=" << track_id; | |
| 662 return false; | |
| 663 } | |
| 664 } | |
| 665 | |
| 666 track->set_id(stream->media_track_id()); | |
| 667 success &= stream->UpdateVideoConfig(video_config, media_log_); | |
| 526 } else { | 668 } else { |
| 527 MEDIA_LOG(ERROR, media_log_) << "Error: unsupported media track type " | 669 MEDIA_LOG(ERROR, media_log_) << "Error: unsupported media track type " |
| 528 << track->type(); | 670 << track->type(); |
| 529 return false; | 671 return false; |
| 530 } | 672 } |
| 531 } | 673 } |
| 532 | 674 |
| 533 DVLOG(1) << "OnNewConfigs(" << allow_audio << ", " << allow_video << ", " | 675 if (!expected_acodecs.empty() || !expected_vcodecs.empty()) { |
| 534 << audio_config.IsValidConfig() << ", " | 676 for (const auto& acodec : expected_acodecs) { |
| 535 << video_config.IsValidConfig() << ")"; | 677 MEDIA_LOG(ERROR, media_log_) << "Initialization segment misses expected " |
| 536 // MSE spec allows new configs to be emitted only during Append, but not | 678 << GetCodecName(acodec) << " track."; |
| 537 // during Flush or parser reset operations. | 679 } |
| 538 CHECK(append_in_progress_); | 680 for (const auto& vcodec : expected_vcodecs) { |
| 539 | 681 MEDIA_LOG(ERROR, media_log_) << "Initialization segment misses expected " |
| 540 if (!audio_config.IsValidConfig() && !video_config.IsValidConfig()) { | 682 << GetCodecName(vcodec) << " track."; |
| 541 DVLOG(1) << "OnNewConfigs() : Audio & video config are not valid!"; | 683 } |
| 542 return false; | 684 return false; |
| 543 } | 685 } |
| 544 | 686 |
| 545 // Signal an error if we get configuration info for stream types that weren't | |
| 546 // specified in AddId() or more configs after a stream is initialized. | |
| 547 if (allow_audio != audio_config.IsValidConfig()) { | |
| 548 MEDIA_LOG(ERROR, media_log_) | |
| 549 << "Initialization segment" | |
| 550 << (audio_config.IsValidConfig() ? " has" : " does not have") | |
| 551 << " an audio track, but the mimetype" | |
| 552 << (allow_audio ? " specifies" : " does not specify") | |
| 553 << " an audio codec."; | |
| 554 return false; | |
| 555 } | |
| 556 | |
| 557 if (allow_video != video_config.IsValidConfig()) { | |
| 558 MEDIA_LOG(ERROR, media_log_) | |
| 559 << "Initialization segment" | |
| 560 << (video_config.IsValidConfig() ? " has" : " does not have") | |
| 561 << " a video track, but the mimetype" | |
| 562 << (allow_video ? " specifies" : " does not specify") | |
| 563 << " a video codec."; | |
| 564 return false; | |
| 565 } | |
| 566 | |
| 567 bool success = true; | |
| 568 if (audio_config.IsValidConfig()) { | |
| 569 if (!audio_) { | |
| 570 media_log_->SetBooleanProperty("found_audio_stream", true); | |
| 571 } | |
| 572 if (!audio_ || | |
| 573 audio_->audio_decoder_config().codec() != audio_config.codec()) { | |
| 574 media_log_->SetStringProperty("audio_codec_name", | |
| 575 GetCodecName(audio_config.codec())); | |
| 576 } | |
| 577 | |
| 578 bool audio_stream_just_created = false; | |
| 579 if (!audio_) { | |
| 580 audio_ = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO); | |
| 581 | |
| 582 if (!audio_) { | |
| 583 DVLOG(1) << "Failed to create an audio stream."; | |
| 584 return false; | |
| 585 } | |
| 586 audio_stream_just_created = true; | |
| 587 | |
| 588 if (!frame_processor_->AddTrack(FrameProcessor::kAudioTrackId, audio_)) { | |
| 589 DVLOG(1) << "Failed to add audio track to frame processor."; | |
| 590 return false; | |
| 591 } | |
| 592 } | |
| 593 | |
| 594 frame_processor_->OnPossibleAudioConfigUpdate(audio_config); | |
| 595 success &= audio_->UpdateAudioConfig(audio_config, media_log_); | |
| 596 | |
| 597 if (audio_stream_just_created) { | |
| 598 std::string audio_buf_limit_switch = | |
| 599 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( | |
| 600 switches::kMSEAudioBufferSizeLimit); | |
| 601 unsigned audio_buf_size_limit = 0; | |
| 602 if (base::StringToUint(audio_buf_limit_switch, &audio_buf_size_limit) && | |
| 603 audio_buf_size_limit > 0) { | |
| 604 MEDIA_LOG(INFO, media_log_) << "Custom audio SourceBuffer size limit=" | |
| 605 << audio_buf_size_limit; | |
| 606 audio_->SetStreamMemoryLimit(audio_buf_size_limit); | |
| 607 } | |
| 608 } | |
| 609 } | |
| 610 | |
| 611 if (video_config.IsValidConfig()) { | |
| 612 if (!video_) { | |
| 613 media_log_->SetBooleanProperty("found_video_stream", true); | |
| 614 } | |
| 615 if (!video_ || | |
| 616 video_->video_decoder_config().codec() != video_config.codec()) { | |
| 617 media_log_->SetStringProperty("video_codec_name", | |
| 618 GetCodecName(video_config.codec())); | |
| 619 } | |
| 620 | |
| 621 bool video_stream_just_created = false; | |
| 622 if (!video_) { | |
| 623 video_ = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO); | |
| 624 | |
| 625 if (!video_) { | |
| 626 DVLOG(1) << "Failed to create a video stream."; | |
| 627 return false; | |
| 628 } | |
| 629 video_stream_just_created = true; | |
| 630 | |
| 631 if (!frame_processor_->AddTrack(FrameProcessor::kVideoTrackId, video_)) { | |
| 632 DVLOG(1) << "Failed to add video track to frame processor."; | |
| 633 return false; | |
| 634 } | |
| 635 } | |
| 636 | |
| 637 success &= video_->UpdateVideoConfig(video_config, media_log_); | |
| 638 | |
| 639 if (video_stream_just_created) { | |
| 640 std::string video_buf_limit_switch = | |
| 641 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( | |
| 642 switches::kMSEVideoBufferSizeLimit); | |
| 643 unsigned video_buf_size_limit = 0; | |
| 644 if (base::StringToUint(video_buf_limit_switch, &video_buf_size_limit) && | |
| 645 video_buf_size_limit > 0) { | |
| 646 MEDIA_LOG(INFO, media_log_) << "Custom video SourceBuffer size limit=" | |
| 647 << video_buf_size_limit; | |
| 648 video_->SetStreamMemoryLimit(video_buf_size_limit); | |
| 649 } | |
| 650 } | |
| 651 } | |
| 652 | |
| 653 typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr; | 687 typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr; |
| 654 if (text_stream_map_.empty()) { | 688 if (text_stream_map_.empty()) { |
| 655 for (TextConfigItr itr = text_configs.begin(); itr != text_configs.end(); | 689 for (TextConfigItr itr = text_configs.begin(); itr != text_configs.end(); |
| 656 ++itr) { | 690 ++itr) { |
| 657 ChunkDemuxerStream* const text_stream = | 691 ChunkDemuxerStream* const text_stream = |
| 658 create_demuxer_stream_cb_.Run(DemuxerStream::TEXT); | 692 create_demuxer_stream_cb_.Run(DemuxerStream::TEXT); |
| 659 if (!frame_processor_->AddTrack(itr->first, text_stream)) { | 693 if (!frame_processor_->AddTrack(itr->first, text_stream)) { |
| 660 success &= false; | 694 success &= false; |
| 661 MEDIA_LOG(ERROR, media_log_) << "Failed to add text track ID " | 695 MEDIA_LOG(ERROR, media_log_) << "Failed to add text track ID " |
| 662 << itr->first << " to frame processor."; | 696 << itr->first << " to frame processor."; |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 718 success &= false; | 752 success &= false; |
| 719 MEDIA_LOG(ERROR, media_log_) << "New text track config for track ID " | 753 MEDIA_LOG(ERROR, media_log_) << "New text track config for track ID " |
| 720 << config_itr->first | 754 << config_itr->first |
| 721 << " does not match old one."; | 755 << " does not match old one."; |
| 722 break; | 756 break; |
| 723 } | 757 } |
| 724 } | 758 } |
| 725 } | 759 } |
| 726 } | 760 } |
| 727 | 761 |
| 762 if (audio_streams_.empty() && video_streams_.empty()) { | |
| 763 DVLOG(1) << __func__ << ": couldn't find a valid audio or video stream"; | |
| 764 return false; | |
| 765 } | |
| 766 | |
| 728 frame_processor_->SetAllTrackBuffersNeedRandomAccessPoint(); | 767 frame_processor_->SetAllTrackBuffersNeedRandomAccessPoint(); |
| 729 | 768 |
| 730 if (audio_track) { | 769 if (!first_init_segment_received_) { |
| 731 DCHECK(audio_); | 770 first_init_segment_received_ = true; |
| 732 audio_track->set_id(audio_->media_track_id()); | 771 SetStreamMemoryLimits(); |
| 733 } | |
| 734 if (video_track) { | |
| 735 DCHECK(video_); | |
| 736 video_track->set_id(video_->media_track_id()); | |
| 737 } | 772 } |
| 738 | 773 |
| 739 DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed"); | 774 DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed"); |
| 740 if (success) { | 775 if (success) { |
| 741 if (state_ == PENDING_PARSER_CONFIG) | 776 if (state_ == PENDING_PARSER_CONFIG) |
| 742 state_ = PENDING_PARSER_INIT; | 777 state_ = PENDING_PARSER_INIT; |
| 743 DCHECK(!init_segment_received_cb_.is_null()); | 778 DCHECK(!init_segment_received_cb_.is_null()); |
| 744 init_segment_received_cb_.Run(std::move(tracks)); | 779 init_segment_received_cb_.Run(std::move(tracks)); |
| 745 } | 780 } |
| 746 | 781 |
| 747 return success; | 782 return success; |
| 748 } | 783 } |
| 749 | 784 |
| 785 void MediaSourceState::SetStreamMemoryLimits() { | |
|
wolenetz
2016/09/14 23:31:21
Note: This method is *not* used solely by tests (v
servolk
2016/09/15 00:18:32
Acknowledged.
| |
| 786 auto cmd_line = base::CommandLine::ForCurrentProcess(); | |
| 787 | |
| 788 std::string audio_buf_limit_switch = | |
| 789 cmd_line->GetSwitchValueASCII(switches::kMSEAudioBufferSizeLimit); | |
| 790 unsigned audio_buf_size_limit = 0; | |
| 791 if (base::StringToUint(audio_buf_limit_switch, &audio_buf_size_limit) && | |
| 792 audio_buf_size_limit > 0) { | |
| 793 MEDIA_LOG(INFO, media_log_) | |
| 794 << "Custom audio per-track SourceBuffer size limit=" | |
| 795 << audio_buf_size_limit; | |
| 796 for (const auto& it : audio_streams_) { | |
| 797 it.second->SetStreamMemoryLimit(audio_buf_size_limit); | |
| 798 } | |
| 799 } | |
| 800 | |
| 801 std::string video_buf_limit_switch = | |
| 802 cmd_line->GetSwitchValueASCII(switches::kMSEVideoBufferSizeLimit); | |
| 803 unsigned video_buf_size_limit = 0; | |
| 804 if (base::StringToUint(video_buf_limit_switch, &video_buf_size_limit) && | |
| 805 video_buf_size_limit > 0) { | |
| 806 MEDIA_LOG(INFO, media_log_) | |
| 807 << "Custom video per-track SourceBuffer size limit=" | |
| 808 << video_buf_size_limit; | |
| 809 for (const auto& it : video_streams_) { | |
| 810 it.second->SetStreamMemoryLimit(video_buf_size_limit); | |
| 811 } | |
| 812 } | |
| 813 } | |
| 814 | |
| 750 void MediaSourceState::OnNewMediaSegment() { | 815 void MediaSourceState::OnNewMediaSegment() { |
| 751 DVLOG(2) << "OnNewMediaSegment()"; | 816 DVLOG(2) << "OnNewMediaSegment()"; |
| 752 DCHECK_EQ(state_, PARSER_INITIALIZED); | 817 DCHECK_EQ(state_, PARSER_INITIALIZED); |
| 753 parsing_media_segment_ = true; | 818 parsing_media_segment_ = true; |
| 754 media_segment_contained_audio_frame_ = false; | 819 media_segment_has_data_for_track_.clear(); |
| 755 media_segment_contained_video_frame_ = false; | |
| 756 } | 820 } |
| 757 | 821 |
| 758 void MediaSourceState::OnEndOfMediaSegment() { | 822 void MediaSourceState::OnEndOfMediaSegment() { |
| 759 DVLOG(2) << "OnEndOfMediaSegment()"; | 823 DVLOG(2) << "OnEndOfMediaSegment()"; |
| 760 DCHECK_EQ(state_, PARSER_INITIALIZED); | 824 DCHECK_EQ(state_, PARSER_INITIALIZED); |
| 761 parsing_media_segment_ = false; | 825 parsing_media_segment_ = false; |
| 762 | 826 |
| 763 const bool missing_audio = audio_ && !media_segment_contained_audio_frame_; | 827 for (const auto& it : audio_streams_) { |
| 764 const bool missing_video = video_ && !media_segment_contained_video_frame_; | 828 if (!media_segment_has_data_for_track_[it.first]) { |
| 765 if (!missing_audio && !missing_video) | 829 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_missing_track_logs_, |
| 766 return; | 830 kMaxMissingTrackInSegmentLogs) |
| 767 | 831 << "Media segment did not contain any coded frames for track " |
| 768 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_missing_track_logs_, | 832 << it.first << ", mismatching initialization segment. Therefore, MSE" |
| 769 kMaxMissingTrackInSegmentLogs) | 833 " coded frame processing may not interoperably detect" |
| 770 << "Media segment did not contain any " | 834 " discontinuities in appended media."; |
| 771 << (missing_audio && missing_video ? "audio or video" | 835 } |
| 772 : missing_audio ? "audio" : "video") | 836 } |
| 773 << " coded frames, mismatching initialization segment. Therefore, MSE " | 837 for (const auto& it : video_streams_) { |
| 774 "coded frame processing may not interoperably detect discontinuities " | 838 if (!media_segment_has_data_for_track_[it.first]) { |
| 775 "in appended media."; | 839 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_missing_track_logs_, |
| 840 kMaxMissingTrackInSegmentLogs) | |
| 841 << "Media segment did not contain any coded frames for track " | |
| 842 << it.first << ", mismatching initialization segment. Therefore, MSE" | |
| 843 " coded frame processing may not interoperably detect" | |
| 844 " discontinuities in appended media."; | |
| 845 } | |
| 846 } | |
| 776 } | 847 } |
| 777 | 848 |
| 778 bool MediaSourceState::OnNewBuffers( | 849 bool MediaSourceState::OnNewBuffers( |
| 779 const StreamParser::BufferQueueMap& buffer_queue_map) { | 850 const StreamParser::BufferQueueMap& buffer_queue_map) { |
| 780 DVLOG(2) << "OnNewBuffers()"; | 851 DVLOG(2) << __func__ << " buffer_queues=" << buffer_queue_map.size(); |
| 781 DCHECK_EQ(state_, PARSER_INITIALIZED); | 852 DCHECK_EQ(state_, PARSER_INITIALIZED); |
| 782 DCHECK(timestamp_offset_during_append_); | 853 DCHECK(timestamp_offset_during_append_); |
| 783 DCHECK(parsing_media_segment_); | 854 DCHECK(parsing_media_segment_); |
| 784 | 855 |
| 785 for (const auto& it : buffer_queue_map) { | 856 for (const auto& it : buffer_queue_map) { |
| 786 const StreamParser::BufferQueue& bufq = it.second; | 857 const StreamParser::BufferQueue& bufq = it.second; |
| 787 DCHECK(!bufq.empty()); | 858 DCHECK(!bufq.empty()); |
| 788 if (bufq[0]->type() == DemuxerStream::AUDIO) { | 859 media_segment_has_data_for_track_[it.first] = true; |
| 789 media_segment_contained_audio_frame_ = true; | |
| 790 } else if (bufq[0]->type() == DemuxerStream::VIDEO) { | |
| 791 media_segment_contained_video_frame_ = true; | |
| 792 } | |
| 793 } | 860 } |
| 794 | 861 |
| 795 const TimeDelta timestamp_offset_before_processing = | 862 const TimeDelta timestamp_offset_before_processing = |
| 796 *timestamp_offset_during_append_; | 863 *timestamp_offset_during_append_; |
| 797 | 864 |
| 798 // Calculate the new timestamp offset for audio/video tracks if the stream | 865 // Calculate the new timestamp offset for audio/video tracks if the stream |
| 799 // parser has requested automatic updates. | 866 // parser has requested automatic updates. |
| 800 TimeDelta new_timestamp_offset = timestamp_offset_before_processing; | 867 TimeDelta new_timestamp_offset = timestamp_offset_before_processing; |
| 801 if (auto_update_timestamp_offset_) { | 868 if (auto_update_timestamp_offset_) { |
| 802 TimeDelta min_end_timestamp = kNoTimestamp; | 869 TimeDelta min_end_timestamp = kNoTimestamp; |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 820 } | 887 } |
| 821 | 888 |
| 822 // Only update the timestamp offset if the frame processor hasn't already. | 889 // Only update the timestamp offset if the frame processor hasn't already. |
| 823 if (auto_update_timestamp_offset_ && | 890 if (auto_update_timestamp_offset_ && |
| 824 timestamp_offset_before_processing == *timestamp_offset_during_append_) { | 891 timestamp_offset_before_processing == *timestamp_offset_during_append_) { |
| 825 *timestamp_offset_during_append_ = new_timestamp_offset; | 892 *timestamp_offset_during_append_ = new_timestamp_offset; |
| 826 } | 893 } |
| 827 | 894 |
| 828 return true; | 895 return true; |
| 829 } | 896 } |
| 830 | |
| 831 void MediaSourceState::OnSourceInitDone( | 897 void MediaSourceState::OnSourceInitDone( |
| 832 const StreamParser::InitParameters& params) { | 898 const StreamParser::InitParameters& params) { |
| 833 DCHECK_EQ(state_, PENDING_PARSER_INIT); | 899 DCHECK_EQ(state_, PENDING_PARSER_INIT); |
| 834 state_ = PARSER_INITIALIZED; | 900 state_ = PARSER_INITIALIZED; |
| 835 auto_update_timestamp_offset_ = params.auto_update_timestamp_offset; | 901 auto_update_timestamp_offset_ = params.auto_update_timestamp_offset; |
| 836 base::ResetAndReturn(&init_cb_).Run(params); | 902 base::ResetAndReturn(&init_cb_).Run(params); |
| 837 } | 903 } |
| 838 | 904 |
| 839 } // namespace media | 905 } // namespace media |
| OLD | NEW |