OLD | NEW |
---|---|
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/media_source_state.h" | 5 #include "media/filters/media_source_state.h" |
6 | 6 |
7 #include <set> | |
8 | |
7 #include "base/callback_helpers.h" | 9 #include "base/callback_helpers.h" |
8 #include "base/command_line.h" | 10 #include "base/command_line.h" |
9 #include "base/stl_util.h" | 11 #include "base/stl_util.h" |
10 #include "base/strings/string_number_conversions.h" | 12 #include "base/strings/string_number_conversions.h" |
11 #include "media/base/media_switches.h" | 13 #include "media/base/media_switches.h" |
12 #include "media/base/media_track.h" | 14 #include "media/base/media_track.h" |
13 #include "media/base/media_tracks.h" | 15 #include "media/base/media_tracks.h" |
16 #include "media/base/mime_util.h" | |
14 #include "media/filters/chunk_demuxer.h" | 17 #include "media/filters/chunk_demuxer.h" |
15 #include "media/filters/frame_processor.h" | 18 #include "media/filters/frame_processor.h" |
16 #include "media/filters/source_buffer_stream.h" | 19 #include "media/filters/source_buffer_stream.h" |
17 | 20 |
18 namespace media { | 21 namespace media { |
19 | 22 |
20 enum { | 23 enum { |
21 // Limits the number of MEDIA_LOG() calls warning the user that a muxed stream | 24 // Limits the number of MEDIA_LOG() calls warning the user that a muxed stream |
22 // media segment is missing a block from at least one of the audio or video | 25 // media segment is missing a block from at least one of the audio or video |
23 // tracks. | 26 // tracks. |
24 kMaxMissingTrackInSegmentLogs = 10, | 27 kMaxMissingTrackInSegmentLogs = 10, |
25 }; | 28 }; |
26 | 29 |
27 static TimeDelta EndTimestamp(const StreamParser::BufferQueue& queue) { | 30 namespace { |
31 | |
32 TimeDelta EndTimestamp(const StreamParser::BufferQueue& queue) { | |
28 return queue.back()->timestamp() + queue.back()->duration(); | 33 return queue.back()->timestamp() + queue.back()->duration(); |
29 } | 34 } |
30 | 35 |
36 // Check the input |text_configs| and |bytestream_ids| and return false if | |
37 // duplicate track ids are detected. | |
38 bool CheckBytestreamTrackIds( | |
39 const MediaTracks& tracks, | |
40 const StreamParser::TextTrackConfigMap& text_configs) { | |
41 std::set<StreamParser::TrackId> bytestream_ids; | |
42 for (const auto& track : tracks.tracks()) { | |
43 const StreamParser::TrackId& track_id = track->bytestream_track_id(); | |
44 if (bytestream_ids.find(track_id) != bytestream_ids.end()) { | |
45 return false; | |
46 } | |
47 bytestream_ids.insert(track_id); | |
48 } | |
49 for (const auto& text_track : text_configs) { | |
50 const StreamParser::TrackId& track_id = text_track.first; | |
51 if (bytestream_ids.find(track_id) != bytestream_ids.end()) { | |
52 return false; | |
53 } | |
54 bytestream_ids.insert(track_id); | |
55 } | |
56 return true; | |
57 } | |
58 | |
59 } // namespace | |
60 | |
31 // List of time ranges for each SourceBuffer. | 61 // List of time ranges for each SourceBuffer. |
32 // static | 62 // static |
33 Ranges<TimeDelta> MediaSourceState::ComputeRangesIntersection( | 63 Ranges<TimeDelta> MediaSourceState::ComputeRangesIntersection( |
34 const RangesList& active_ranges, | 64 const RangesList& active_ranges, |
35 bool ended) { | 65 bool ended) { |
36 // TODO(servolk): Perhaps this can be removed in favor of blink implementation | 66 // TODO(servolk): Perhaps this can be removed in favor of blink implementation |
37 // (MediaSource::buffered)? Currently this is only used on Android and for | 67 // (MediaSource::buffered)? Currently this is only used on Android and for |
38 // updating DemuxerHost's buffered ranges during AppendData() as well as | 68 // updating DemuxerHost's buffered ranges during AppendData() as well as |
39 // SourceBuffer.buffered property implemetation. | 69 // SourceBuffer.buffered property implemetation. |
40 // Implementation of HTMLMediaElement.buffered algorithm in MSE spec. | 70 // Implementation of HTMLMediaElement.buffered algorithm in MSE spec. |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
87 } | 117 } |
88 | 118 |
89 MediaSourceState::MediaSourceState( | 119 MediaSourceState::MediaSourceState( |
90 std::unique_ptr<StreamParser> stream_parser, | 120 std::unique_ptr<StreamParser> stream_parser, |
91 std::unique_ptr<FrameProcessor> frame_processor, | 121 std::unique_ptr<FrameProcessor> frame_processor, |
92 const CreateDemuxerStreamCB& create_demuxer_stream_cb, | 122 const CreateDemuxerStreamCB& create_demuxer_stream_cb, |
93 const scoped_refptr<MediaLog>& media_log) | 123 const scoped_refptr<MediaLog>& media_log) |
94 : create_demuxer_stream_cb_(create_demuxer_stream_cb), | 124 : create_demuxer_stream_cb_(create_demuxer_stream_cb), |
95 timestamp_offset_during_append_(NULL), | 125 timestamp_offset_during_append_(NULL), |
96 parsing_media_segment_(false), | 126 parsing_media_segment_(false), |
97 media_segment_contained_audio_frame_(false), | |
98 media_segment_contained_video_frame_(false), | |
99 stream_parser_(stream_parser.release()), | 127 stream_parser_(stream_parser.release()), |
100 audio_(NULL), | |
101 video_(NULL), | |
102 frame_processor_(frame_processor.release()), | 128 frame_processor_(frame_processor.release()), |
103 media_log_(media_log), | 129 media_log_(media_log), |
104 state_(UNINITIALIZED), | 130 state_(UNINITIALIZED), |
105 auto_update_timestamp_offset_(false) { | 131 auto_update_timestamp_offset_(false) { |
106 DCHECK(!create_demuxer_stream_cb_.is_null()); | 132 DCHECK(!create_demuxer_stream_cb_.is_null()); |
107 DCHECK(frame_processor_); | 133 DCHECK(frame_processor_); |
108 } | 134 } |
109 | 135 |
110 MediaSourceState::~MediaSourceState() { | 136 MediaSourceState::~MediaSourceState() { |
111 Shutdown(); | 137 Shutdown(); |
112 | 138 |
113 base::STLDeleteValues(&text_stream_map_); | 139 base::STLDeleteValues(&text_stream_map_); |
114 } | 140 } |
115 | 141 |
116 void MediaSourceState::Init( | 142 void MediaSourceState::Init( |
117 const StreamParser::InitCB& init_cb, | 143 const StreamParser::InitCB& init_cb, |
118 bool allow_audio, | 144 const std::string& expected_codecs, |
119 bool allow_video, | |
120 const StreamParser::EncryptedMediaInitDataCB& encrypted_media_init_data_cb, | 145 const StreamParser::EncryptedMediaInitDataCB& encrypted_media_init_data_cb, |
121 const NewTextTrackCB& new_text_track_cb) { | 146 const NewTextTrackCB& new_text_track_cb) { |
122 DCHECK_EQ(state_, UNINITIALIZED); | 147 DCHECK_EQ(state_, UNINITIALIZED); |
123 new_text_track_cb_ = new_text_track_cb; | 148 new_text_track_cb_ = new_text_track_cb; |
124 init_cb_ = init_cb; | 149 init_cb_ = init_cb; |
125 | 150 |
151 std::vector<std::string> expected_codecs_parsed; | |
152 ParseCodecString(expected_codecs, &expected_codecs_parsed, false); | |
153 | |
154 std::vector<AudioCodec> expected_acodecs; | |
155 std::vector<VideoCodec> expected_vcodecs; | |
156 for (const auto& codec_id : expected_codecs_parsed) { | |
157 AudioCodec acodec = StringToAudioCodec(codec_id); | |
158 if (acodec != kUnknownAudioCodec) { | |
159 expected_audio_codecs_.push_back(acodec); | |
160 continue; | |
161 } | |
162 VideoCodec vcodec = StringToVideoCodec(codec_id); | |
163 if (vcodec != kUnknownVideoCodec) { | |
164 expected_video_codecs_.push_back(vcodec); | |
165 continue; | |
166 } | |
167 MEDIA_LOG(INFO, media_log_) << "Unrecognized media codec: " << codec_id; | |
168 } | |
169 | |
126 state_ = PENDING_PARSER_CONFIG; | 170 state_ = PENDING_PARSER_CONFIG; |
127 stream_parser_->Init( | 171 stream_parser_->Init( |
128 base::Bind(&MediaSourceState::OnSourceInitDone, base::Unretained(this)), | 172 base::Bind(&MediaSourceState::OnSourceInitDone, base::Unretained(this)), |
129 base::Bind(&MediaSourceState::OnNewConfigs, base::Unretained(this), | 173 base::Bind(&MediaSourceState::OnNewConfigs, base::Unretained(this), |
130 allow_audio, allow_video), | 174 expected_codecs), |
131 base::Bind(&MediaSourceState::OnNewBuffers, base::Unretained(this)), | 175 base::Bind(&MediaSourceState::OnNewBuffers, base::Unretained(this)), |
132 new_text_track_cb_.is_null(), encrypted_media_init_data_cb, | 176 new_text_track_cb_.is_null(), encrypted_media_init_data_cb, |
133 base::Bind(&MediaSourceState::OnNewMediaSegment, base::Unretained(this)), | 177 base::Bind(&MediaSourceState::OnNewMediaSegment, base::Unretained(this)), |
134 base::Bind(&MediaSourceState::OnEndOfMediaSegment, | 178 base::Bind(&MediaSourceState::OnEndOfMediaSegment, |
135 base::Unretained(this)), | 179 base::Unretained(this)), |
136 media_log_); | 180 media_log_); |
137 } | 181 } |
138 | 182 |
139 void MediaSourceState::SetSequenceMode(bool sequence_mode) { | 183 void MediaSourceState::SetSequenceMode(bool sequence_mode) { |
140 DCHECK(!parsing_media_segment_); | 184 DCHECK(!parsing_media_segment_); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
189 DCHECK(!timestamp_offset_during_append_); | 233 DCHECK(!timestamp_offset_during_append_); |
190 timestamp_offset_during_append_ = timestamp_offset; | 234 timestamp_offset_during_append_ = timestamp_offset; |
191 append_window_start_during_append_ = append_window_start; | 235 append_window_start_during_append_ = append_window_start; |
192 append_window_end_during_append_ = append_window_end; | 236 append_window_end_during_append_ = append_window_end; |
193 | 237 |
194 stream_parser_->Flush(); | 238 stream_parser_->Flush(); |
195 timestamp_offset_during_append_ = NULL; | 239 timestamp_offset_during_append_ = NULL; |
196 | 240 |
197 frame_processor_->Reset(); | 241 frame_processor_->Reset(); |
198 parsing_media_segment_ = false; | 242 parsing_media_segment_ = false; |
199 media_segment_contained_audio_frame_ = false; | 243 media_segment_has_data_for_track_.clear(); |
200 media_segment_contained_video_frame_ = false; | |
201 } | 244 } |
202 | 245 |
203 void MediaSourceState::Remove(TimeDelta start, | 246 void MediaSourceState::Remove(TimeDelta start, |
204 TimeDelta end, | 247 TimeDelta end, |
205 TimeDelta duration) { | 248 TimeDelta duration) { |
206 if (audio_) | 249 for (const auto& it : audio_streams_) { |
207 audio_->Remove(start, end, duration); | 250 it.second->Remove(start, end, duration); |
251 } | |
208 | 252 |
209 if (video_) | 253 for (const auto& it : video_streams_) { |
210 video_->Remove(start, end, duration); | 254 it.second->Remove(start, end, duration); |
255 } | |
211 | 256 |
212 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 257 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
213 itr != text_stream_map_.end(); ++itr) { | 258 itr != text_stream_map_.end(); ++itr) { |
214 itr->second->Remove(start, end, duration); | 259 itr->second->Remove(start, end, duration); |
215 } | 260 } |
216 } | 261 } |
217 | 262 |
218 size_t MediaSourceState::EstimateVideoDataSize( | |
219 size_t muxed_data_chunk_size) const { | |
220 DCHECK(audio_); | |
221 DCHECK(video_); | |
222 | |
223 size_t videoBufferedSize = video_->GetBufferedSize(); | |
224 size_t audioBufferedSize = audio_->GetBufferedSize(); | |
225 if (videoBufferedSize == 0 || audioBufferedSize == 0) { | |
226 // At this point either audio or video buffer is empty, which means buffer | |
227 // levels are probably low anyway and we should have enough space in the | |
228 // buffers for appending new data, so just take a very rough guess. | |
229 return muxed_data_chunk_size * 7 / 8; | |
230 } | |
231 | |
232 // We need to estimate how much audio and video data is going to be in the | |
233 // newly appended data chunk to make space for the new data. And we need to do | |
234 // that without parsing the data (which will happen later, in the Append | |
235 // phase). So for now we can only rely on some heuristic here. Let's assume | |
236 // that the proportion of the audio/video in the new data chunk is the same as | |
237 // the current ratio of buffered audio/video. | |
238 // Longer term this should go away once we further change the MSE GC algorithm | |
239 // to work across all streams of a SourceBuffer (see crbug.com/520704). | |
240 double videoBufferedSizeF = static_cast<double>(videoBufferedSize); | |
241 double audioBufferedSizeF = static_cast<double>(audioBufferedSize); | |
242 | |
243 double totalBufferedSizeF = videoBufferedSizeF + audioBufferedSizeF; | |
244 CHECK_GT(totalBufferedSizeF, 0.0); | |
245 | |
246 double videoRatio = videoBufferedSizeF / totalBufferedSizeF; | |
247 CHECK_GE(videoRatio, 0.0); | |
248 CHECK_LE(videoRatio, 1.0); | |
249 double estimatedVideoSize = muxed_data_chunk_size * videoRatio; | |
250 return static_cast<size_t>(estimatedVideoSize); | |
251 } | |
252 | |
253 bool MediaSourceState::EvictCodedFrames(DecodeTimestamp media_time, | 263 bool MediaSourceState::EvictCodedFrames(DecodeTimestamp media_time, |
254 size_t newDataSize) { | 264 size_t newDataSize) { |
255 bool success = true; | 265 size_t total_buffered_size = 0; |
266 for (const auto& it : audio_streams_) | |
267 total_buffered_size += it.second->GetBufferedSize(); | |
268 for (const auto& it : video_streams_) | |
269 total_buffered_size += it.second->GetBufferedSize(); | |
270 for (const auto& it : text_stream_map_) | |
271 total_buffered_size += it.second->GetBufferedSize(); | |
256 | 272 |
257 DVLOG(3) << __func__ << " media_time=" << media_time.InSecondsF() | 273 DVLOG(3) << __func__ << " media_time=" << media_time.InSecondsF() |
258 << " newDataSize=" << newDataSize | 274 << " newDataSize=" << newDataSize |
259 << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0) | 275 << " total_buffered_size=" << total_buffered_size; |
260 << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0); | |
261 | 276 |
262 size_t newAudioSize = 0; | 277 if (total_buffered_size == 0) |
263 size_t newVideoSize = 0; | 278 return true; |
264 if (audio_ && video_) { | 279 |
265 newVideoSize = EstimateVideoDataSize(newDataSize); | 280 bool success = true; |
266 newAudioSize = newDataSize - newVideoSize; | 281 for (const auto& it : audio_streams_) { |
267 } else if (video_) { | 282 size_t curr_size = it.second->GetBufferedSize(); |
268 newVideoSize = newDataSize; | 283 if (curr_size == 0) |
269 } else if (audio_) { | 284 continue; |
270 newAudioSize = newDataSize; | 285 size_t estimated_new_size = newDataSize * curr_size / total_buffered_size; |
286 success &= it.second->EvictCodedFrames(media_time, estimated_new_size); | |
287 } | |
288 for (const auto& it : video_streams_) { | |
289 size_t curr_size = it.second->GetBufferedSize(); | |
290 if (curr_size == 0) | |
291 continue; | |
292 size_t estimated_new_size = newDataSize * curr_size / total_buffered_size; | |
293 success &= it.second->EvictCodedFrames(media_time, estimated_new_size); | |
294 } | |
295 for (const auto& it : text_stream_map_) { | |
296 size_t curr_size = it.second->GetBufferedSize(); | |
297 if (curr_size == 0) | |
298 continue; | |
299 size_t estimated_new_size = newDataSize * curr_size / total_buffered_size; | |
300 success &= it.second->EvictCodedFrames(media_time, estimated_new_size); | |
271 } | 301 } |
272 | 302 |
273 DVLOG(3) << __func__ | 303 DVLOG(3) << __func__ << " success=" << success; |
274 << " estimated audio/video sizes: newVideoSize=" << newVideoSize | |
275 << " newAudioSize=" << newAudioSize; | |
276 | |
277 if (audio_) | |
278 success = audio_->EvictCodedFrames(media_time, newAudioSize) && success; | |
279 | |
280 if (video_) | |
281 success = video_->EvictCodedFrames(media_time, newVideoSize) && success; | |
282 | |
283 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | |
284 itr != text_stream_map_.end(); ++itr) { | |
285 success = itr->second->EvictCodedFrames(media_time, 0) && success; | |
286 } | |
287 | |
288 DVLOG(3) << __func__ << " result=" << success | |
289 << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0) | |
290 << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0); | |
291 | |
292 return success; | 304 return success; |
293 } | 305 } |
294 | 306 |
295 Ranges<TimeDelta> MediaSourceState::GetBufferedRanges(TimeDelta duration, | 307 Ranges<TimeDelta> MediaSourceState::GetBufferedRanges(TimeDelta duration, |
296 bool ended) const { | 308 bool ended) const { |
297 // TODO(acolwell): When we start allowing disabled tracks we'll need to update | |
298 // this code to only add ranges from active tracks. | |
299 RangesList ranges_list; | 309 RangesList ranges_list; |
300 if (audio_) | 310 for (const auto& it : audio_streams_) |
301 ranges_list.push_back(audio_->GetBufferedRanges(duration)); | 311 ranges_list.push_back(it.second->GetBufferedRanges(duration)); |
302 | 312 |
303 if (video_) | 313 for (const auto& it : video_streams_) |
304 ranges_list.push_back(video_->GetBufferedRanges(duration)); | 314 ranges_list.push_back(it.second->GetBufferedRanges(duration)); |
305 | 315 |
306 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); | 316 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); |
307 itr != text_stream_map_.end(); ++itr) { | 317 itr != text_stream_map_.end(); ++itr) { |
308 ranges_list.push_back(itr->second->GetBufferedRanges(duration)); | 318 ranges_list.push_back(itr->second->GetBufferedRanges(duration)); |
309 } | 319 } |
310 | 320 |
311 return ComputeRangesIntersection(ranges_list, ended); | 321 return ComputeRangesIntersection(ranges_list, ended); |
312 } | 322 } |
313 | 323 |
314 TimeDelta MediaSourceState::GetHighestPresentationTimestamp() const { | 324 TimeDelta MediaSourceState::GetHighestPresentationTimestamp() const { |
315 TimeDelta max_pts; | 325 TimeDelta max_pts; |
316 | 326 |
317 if (audio_) | 327 for (const auto& it : audio_streams_) { |
318 max_pts = std::max(max_pts, audio_->GetHighestPresentationTimestamp()); | 328 max_pts = std::max(max_pts, it.second->GetHighestPresentationTimestamp()); |
329 } | |
319 | 330 |
320 if (video_) | 331 for (const auto& it : video_streams_) { |
321 max_pts = std::max(max_pts, video_->GetHighestPresentationTimestamp()); | 332 max_pts = std::max(max_pts, it.second->GetHighestPresentationTimestamp()); |
333 } | |
322 | 334 |
323 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); | 335 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); |
324 itr != text_stream_map_.end(); ++itr) { | 336 itr != text_stream_map_.end(); ++itr) { |
325 max_pts = std::max(max_pts, itr->second->GetHighestPresentationTimestamp()); | 337 max_pts = std::max(max_pts, itr->second->GetHighestPresentationTimestamp()); |
326 } | 338 } |
327 | 339 |
328 return max_pts; | 340 return max_pts; |
329 } | 341 } |
330 | 342 |
331 TimeDelta MediaSourceState::GetMaxBufferedDuration() const { | 343 TimeDelta MediaSourceState::GetMaxBufferedDuration() const { |
332 TimeDelta max_duration; | 344 TimeDelta max_duration; |
333 | 345 |
334 if (audio_) | 346 for (const auto& it : audio_streams_) { |
335 max_duration = std::max(max_duration, audio_->GetBufferedDuration()); | 347 max_duration = std::max(max_duration, it.second->GetBufferedDuration()); |
348 } | |
336 | 349 |
337 if (video_) | 350 for (const auto& it : video_streams_) { |
338 max_duration = std::max(max_duration, video_->GetBufferedDuration()); | 351 max_duration = std::max(max_duration, it.second->GetBufferedDuration()); |
352 } | |
339 | 353 |
340 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); | 354 for (TextStreamMap::const_iterator itr = text_stream_map_.begin(); |
341 itr != text_stream_map_.end(); ++itr) { | 355 itr != text_stream_map_.end(); ++itr) { |
342 max_duration = std::max(max_duration, itr->second->GetBufferedDuration()); | 356 max_duration = std::max(max_duration, itr->second->GetBufferedDuration()); |
343 } | 357 } |
344 | 358 |
345 return max_duration; | 359 return max_duration; |
346 } | 360 } |
347 | 361 |
348 void MediaSourceState::StartReturningData() { | 362 void MediaSourceState::StartReturningData() { |
349 if (audio_) | 363 for (const auto& it : audio_streams_) { |
350 audio_->StartReturningData(); | 364 it.second->StartReturningData(); |
365 } | |
351 | 366 |
352 if (video_) | 367 for (const auto& it : video_streams_) { |
353 video_->StartReturningData(); | 368 it.second->StartReturningData(); |
369 } | |
354 | 370 |
355 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 371 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
356 itr != text_stream_map_.end(); ++itr) { | 372 itr != text_stream_map_.end(); ++itr) { |
357 itr->second->StartReturningData(); | 373 itr->second->StartReturningData(); |
358 } | 374 } |
359 } | 375 } |
360 | 376 |
361 void MediaSourceState::AbortReads() { | 377 void MediaSourceState::AbortReads() { |
362 if (audio_) | 378 for (const auto& it : audio_streams_) { |
363 audio_->AbortReads(); | 379 it.second->AbortReads(); |
380 } | |
364 | 381 |
365 if (video_) | 382 for (const auto& it : video_streams_) { |
366 video_->AbortReads(); | 383 it.second->AbortReads(); |
384 } | |
367 | 385 |
368 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 386 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
369 itr != text_stream_map_.end(); ++itr) { | 387 itr != text_stream_map_.end(); ++itr) { |
370 itr->second->AbortReads(); | 388 itr->second->AbortReads(); |
371 } | 389 } |
372 } | 390 } |
373 | 391 |
374 void MediaSourceState::Seek(TimeDelta seek_time) { | 392 void MediaSourceState::Seek(TimeDelta seek_time) { |
375 if (audio_) | 393 for (const auto& it : audio_streams_) { |
376 audio_->Seek(seek_time); | 394 it.second->Seek(seek_time); |
395 } | |
377 | 396 |
378 if (video_) | 397 for (const auto& it : video_streams_) { |
379 video_->Seek(seek_time); | 398 it.second->Seek(seek_time); |
399 } | |
380 | 400 |
381 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 401 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
382 itr != text_stream_map_.end(); ++itr) { | 402 itr != text_stream_map_.end(); ++itr) { |
383 itr->second->Seek(seek_time); | 403 itr->second->Seek(seek_time); |
384 } | 404 } |
385 } | 405 } |
386 | 406 |
387 void MediaSourceState::CompletePendingReadIfPossible() { | 407 void MediaSourceState::CompletePendingReadIfPossible() { |
388 if (audio_) | 408 for (const auto& it : audio_streams_) { |
389 audio_->CompletePendingReadIfPossible(); | 409 it.second->CompletePendingReadIfPossible(); |
410 } | |
390 | 411 |
391 if (video_) | 412 for (const auto& it : video_streams_) { |
392 video_->CompletePendingReadIfPossible(); | 413 it.second->CompletePendingReadIfPossible(); |
414 } | |
393 | 415 |
394 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 416 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
395 itr != text_stream_map_.end(); ++itr) { | 417 itr != text_stream_map_.end(); ++itr) { |
396 itr->second->CompletePendingReadIfPossible(); | 418 itr->second->CompletePendingReadIfPossible(); |
397 } | 419 } |
398 } | 420 } |
399 | 421 |
400 void MediaSourceState::OnSetDuration(TimeDelta duration) { | 422 void MediaSourceState::OnSetDuration(TimeDelta duration) { |
401 if (audio_) | 423 for (const auto& it : audio_streams_) { |
402 audio_->OnSetDuration(duration); | 424 it.second->OnSetDuration(duration); |
425 } | |
403 | 426 |
404 if (video_) | 427 for (const auto& it : video_streams_) { |
405 video_->OnSetDuration(duration); | 428 it.second->OnSetDuration(duration); |
429 } | |
406 | 430 |
407 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 431 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
408 itr != text_stream_map_.end(); ++itr) { | 432 itr != text_stream_map_.end(); ++itr) { |
409 itr->second->OnSetDuration(duration); | 433 itr->second->OnSetDuration(duration); |
410 } | 434 } |
411 } | 435 } |
412 | 436 |
413 void MediaSourceState::MarkEndOfStream() { | 437 void MediaSourceState::MarkEndOfStream() { |
414 if (audio_) | 438 for (const auto& it : audio_streams_) { |
415 audio_->MarkEndOfStream(); | 439 it.second->MarkEndOfStream(); |
440 } | |
416 | 441 |
417 if (video_) | 442 for (const auto& it : video_streams_) { |
418 video_->MarkEndOfStream(); | 443 it.second->MarkEndOfStream(); |
444 } | |
419 | 445 |
420 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 446 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
421 itr != text_stream_map_.end(); ++itr) { | 447 itr != text_stream_map_.end(); ++itr) { |
422 itr->second->MarkEndOfStream(); | 448 itr->second->MarkEndOfStream(); |
423 } | 449 } |
424 } | 450 } |
425 | 451 |
426 void MediaSourceState::UnmarkEndOfStream() { | 452 void MediaSourceState::UnmarkEndOfStream() { |
427 if (audio_) | 453 for (const auto& it : audio_streams_) { |
428 audio_->UnmarkEndOfStream(); | 454 it.second->UnmarkEndOfStream(); |
455 } | |
429 | 456 |
430 if (video_) | 457 for (const auto& it : video_streams_) { |
431 video_->UnmarkEndOfStream(); | 458 it.second->UnmarkEndOfStream(); |
459 } | |
432 | 460 |
433 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 461 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
434 itr != text_stream_map_.end(); ++itr) { | 462 itr != text_stream_map_.end(); ++itr) { |
435 itr->second->UnmarkEndOfStream(); | 463 itr->second->UnmarkEndOfStream(); |
436 } | 464 } |
437 } | 465 } |
438 | 466 |
439 void MediaSourceState::Shutdown() { | 467 void MediaSourceState::Shutdown() { |
440 if (audio_) | 468 for (const auto& it : audio_streams_) { |
441 audio_->Shutdown(); | 469 it.second->Shutdown(); |
470 } | |
442 | 471 |
443 if (video_) | 472 for (const auto& it : video_streams_) { |
444 video_->Shutdown(); | 473 it.second->Shutdown(); |
474 } | |
445 | 475 |
446 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 476 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
447 itr != text_stream_map_.end(); ++itr) { | 477 itr != text_stream_map_.end(); ++itr) { |
448 itr->second->Shutdown(); | 478 itr->second->Shutdown(); |
449 } | 479 } |
450 } | 480 } |
451 | 481 |
452 void MediaSourceState::SetMemoryLimits(DemuxerStream::Type type, | 482 void MediaSourceState::SetMemoryLimits(DemuxerStream::Type type, |
453 size_t memory_limit) { | 483 size_t memory_limit) { |
454 switch (type) { | 484 switch (type) { |
455 case DemuxerStream::AUDIO: | 485 case DemuxerStream::AUDIO: |
456 if (audio_) | 486 for (const auto& it : audio_streams_) { |
457 audio_->SetStreamMemoryLimit(memory_limit); | 487 it.second->SetStreamMemoryLimit(memory_limit); |
488 } | |
458 break; | 489 break; |
459 case DemuxerStream::VIDEO: | 490 case DemuxerStream::VIDEO: |
460 if (video_) | 491 for (const auto& it : video_streams_) { |
461 video_->SetStreamMemoryLimit(memory_limit); | 492 it.second->SetStreamMemoryLimit(memory_limit); |
493 } | |
462 break; | 494 break; |
463 case DemuxerStream::TEXT: | 495 case DemuxerStream::TEXT: |
464 for (TextStreamMap::iterator itr = text_stream_map_.begin(); | 496 for (TextStreamMap::iterator itr = text_stream_map_.begin(); |
465 itr != text_stream_map_.end(); ++itr) { | 497 itr != text_stream_map_.end(); ++itr) { |
466 itr->second->SetStreamMemoryLimit(memory_limit); | 498 itr->second->SetStreamMemoryLimit(memory_limit); |
467 } | 499 } |
468 break; | 500 break; |
469 case DemuxerStream::UNKNOWN: | 501 case DemuxerStream::UNKNOWN: |
470 case DemuxerStream::NUM_TYPES: | 502 case DemuxerStream::NUM_TYPES: |
471 NOTREACHED(); | 503 NOTREACHED(); |
472 break; | 504 break; |
473 } | 505 } |
474 } | 506 } |
475 | 507 |
476 bool MediaSourceState::IsSeekWaitingForData() const { | 508 bool MediaSourceState::IsSeekWaitingForData() const { |
477 if (audio_ && audio_->IsSeekWaitingForData()) | 509 for (const auto& it : audio_streams_) { |
478 return true; | 510 if (it.second->IsSeekWaitingForData()) |
511 return true; | |
512 } | |
479 | 513 |
480 if (video_ && video_->IsSeekWaitingForData()) | 514 for (const auto& it : video_streams_) { |
481 return true; | 515 if (it.second->IsSeekWaitingForData()) |
516 return true; | |
517 } | |
482 | 518 |
483 // NOTE: We are intentionally not checking the text tracks | 519 // NOTE: We are intentionally not checking the text tracks |
484 // because text tracks are discontinuous and may not have data | 520 // because text tracks are discontinuous and may not have data |
485 // for the seek position. This is ok and playback should not be | 521 // for the seek position. This is ok and playback should not be |
486 // stalled because we don't have cues. If cues, with timestamps after | 522 // stalled because we don't have cues. If cues, with timestamps after |
487 // the seek time, eventually arrive they will be delivered properly | 523 // the seek time, eventually arrive they will be delivered properly |
488 // in response to ChunkDemuxerStream::Read() calls. | 524 // in response to ChunkDemuxerStream::Read() calls. |
489 | 525 |
490 return false; | 526 return false; |
491 } | 527 } |
492 | 528 |
493 bool MediaSourceState::OnNewConfigs( | 529 bool MediaSourceState::OnNewConfigs( |
494 bool allow_audio, | 530 std::string expected_codecs, |
495 bool allow_video, | |
496 std::unique_ptr<MediaTracks> tracks, | 531 std::unique_ptr<MediaTracks> tracks, |
497 const StreamParser::TextTrackConfigMap& text_configs) { | 532 const StreamParser::TextTrackConfigMap& text_configs) { |
533 DCHECK(tracks.get()); | |
534 DVLOG(1) << __func__ << " expected_codecs=" << expected_codecs | |
chcunningham
2016/09/16 00:05:06
Do you need to pass in expected codecs here? You'v
servolk
2016/09/16 00:34:31
Yes, expected codecs is no longer used here, excep
| |
535 << " tracks=" << tracks->tracks().size(); | |
498 DCHECK_GE(state_, PENDING_PARSER_CONFIG); | 536 DCHECK_GE(state_, PENDING_PARSER_CONFIG); |
499 DCHECK(tracks.get()); | |
500 | 537 |
501 MediaTrack* audio_track = nullptr; | 538 // Check that there is no clashing bytestream track ids. |
502 MediaTrack* video_track = nullptr; | 539 if (!CheckBytestreamTrackIds(*tracks, text_configs)) { |
503 AudioDecoderConfig audio_config; | 540 MEDIA_LOG(ERROR, media_log_) << "Duplicate bytestream track ids detected"; |
504 VideoDecoderConfig video_config; | 541 for (const auto& track : tracks->tracks()) { |
542 const StreamParser::TrackId& track_id = track->bytestream_track_id(); | |
543 MEDIA_LOG(DEBUG, media_log_) << TrackTypeToStr(track->type()) << " track " | |
544 << " bytestream track id=" << track_id; | |
545 } | |
546 return false; | |
547 } | |
548 | |
549 // MSE spec allows new configs to be emitted only during Append, but not | |
550 // during Flush or parser reset operations. | |
551 CHECK(append_in_progress_); | |
552 | |
553 bool success = true; | |
554 | |
555 // TODO(wolenetz): Update codec string strictness, if necessary, once spec | |
556 // issue https://github.com/w3c/media-source/issues/161 is resolved. | |
557 std::vector<AudioCodec> expected_acodecs = expected_audio_codecs_; | |
558 std::vector<VideoCodec> expected_vcodecs = expected_video_codecs_; | |
559 | |
505 for (const auto& track : tracks->tracks()) { | 560 for (const auto& track : tracks->tracks()) { |
506 const auto& track_id = track->bytestream_track_id(); | 561 const auto& track_id = track->bytestream_track_id(); |
507 | 562 |
508 if (track->type() == MediaTrack::Audio) { | 563 if (track->type() == MediaTrack::Audio) { |
509 if (audio_track) { | 564 AudioDecoderConfig audio_config = tracks->getAudioConfig(track_id); |
510 MEDIA_LOG(ERROR, media_log_) | 565 DVLOG(1) << "Audio track_id=" << track_id |
511 << "Error: more than one audio track is currently not supported."; | 566 << " config: " << audio_config.AsHumanReadableString(); |
567 DCHECK(audio_config.IsValidConfig()); | |
568 | |
569 const auto& it = std::find(expected_acodecs.begin(), | |
570 expected_acodecs.end(), audio_config.codec()); | |
571 if (it == expected_acodecs.end()) { | |
572 MEDIA_LOG(ERROR, media_log_) << "Audio stream codec " | |
573 << GetCodecName(audio_config.codec()) | |
574 << " doesn't match SourceBuffer codecs."; | |
512 return false; | 575 return false; |
513 } | 576 } |
514 audio_track = track.get(); | 577 expected_acodecs.erase(it); |
515 audio_config = tracks->getAudioConfig(track_id); | 578 |
516 DCHECK(audio_config.IsValidConfig()); | 579 ChunkDemuxerStream* stream = nullptr; |
580 if (!first_init_segment_received_) { | |
581 DCHECK(audio_streams_.find(track_id) == audio_streams_.end()); | |
582 stream = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO); | |
583 if (!stream || !frame_processor_->AddTrack(track_id, stream)) { | |
584 MEDIA_LOG(ERROR, media_log_) << "Failed to create audio stream."; | |
585 return false; | |
586 } | |
587 audio_streams_[track_id] = stream; | |
588 media_log_->SetBooleanProperty("found_audio_stream", true); | |
589 media_log_->SetStringProperty("audio_codec_name", | |
590 GetCodecName(audio_config.codec())); | |
591 } else { | |
592 if (audio_streams_.size() > 1) { | |
chcunningham
2016/09/16 00:05:06
I think this core part of your change (adding mult
servolk
2016/09/16 00:34:31
I've added some new tests (including ones where we
chcunningham1
2016/09/16 16:49:19
Right, I see your todo. I think you can side step
servolk
2016/09/16 17:05:19
I think testing via chunk demuxer _is_ the best so
| |
593 stream = audio_streams_[track_id]; | |
594 } else { | |
595 // If there is only one audio track then bytestream id might change in | |
596 // a new init segment. So update our state and nofity frame processor. | |
597 const auto& it = audio_streams_.begin(); | |
598 if (it != audio_streams_.end()) { | |
599 stream = it->second; | |
600 if (it->first != track_id) { | |
601 frame_processor_->UpdateTrack(it->first, track_id); | |
602 audio_streams_[track_id] = stream; | |
603 audio_streams_.erase(it->first); | |
604 } | |
605 } | |
606 } | |
607 if (!stream) { | |
608 MEDIA_LOG(ERROR, media_log_) << "Got unexpected audio track" | |
609 << " track_id=" << track_id; | |
610 return false; | |
611 } | |
612 } | |
613 | |
614 track->set_id(stream->media_track_id()); | |
615 frame_processor_->OnPossibleAudioConfigUpdate(audio_config); | |
616 success &= stream->UpdateAudioConfig(audio_config, media_log_); | |
517 } else if (track->type() == MediaTrack::Video) { | 617 } else if (track->type() == MediaTrack::Video) { |
518 if (video_track) { | 618 VideoDecoderConfig video_config = tracks->getVideoConfig(track_id); |
519 MEDIA_LOG(ERROR, media_log_) | 619 DVLOG(1) << "Video track_id=" << track_id |
520 << "Error: more than one video track is currently not supported."; | 620 << " config: " << video_config.AsHumanReadableString(); |
621 DCHECK(video_config.IsValidConfig()); | |
622 | |
623 const auto& it = std::find(expected_vcodecs.begin(), | |
624 expected_vcodecs.end(), video_config.codec()); | |
625 if (it == expected_vcodecs.end()) { | |
626 MEDIA_LOG(ERROR, media_log_) << "Video stream codec " | |
627 << GetCodecName(video_config.codec()) | |
628 << " doesn't match SourceBuffer codecs."; | |
521 return false; | 629 return false; |
522 } | 630 } |
523 video_track = track.get(); | 631 expected_vcodecs.erase(it); |
524 video_config = tracks->getVideoConfig(track_id); | 632 |
525 DCHECK(video_config.IsValidConfig()); | 633 ChunkDemuxerStream* stream = nullptr; |
634 if (!first_init_segment_received_) { | |
635 DCHECK(video_streams_.find(track_id) == video_streams_.end()); | |
636 stream = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO); | |
637 if (!stream || !frame_processor_->AddTrack(track_id, stream)) { | |
638 MEDIA_LOG(ERROR, media_log_) << "Failed to create video stream."; | |
639 return false; | |
640 } | |
641 video_streams_[track_id] = stream; | |
642 media_log_->SetBooleanProperty("found_video_stream", true); | |
643 media_log_->SetStringProperty("video_codec_name", | |
644 GetCodecName(video_config.codec())); | |
645 } else { | |
646 if (video_streams_.size() > 1) { | |
647 stream = video_streams_[track_id]; | |
648 } else { | |
649 // If there is only one video track then bytestream id might change in | |
650 // a new init segment. So update our state and nofity frame processor. | |
651 const auto& it = video_streams_.begin(); | |
652 if (it != video_streams_.end()) { | |
653 stream = it->second; | |
654 if (it->first != track_id) { | |
655 frame_processor_->UpdateTrack(it->first, track_id); | |
656 video_streams_[track_id] = stream; | |
657 video_streams_.erase(it->first); | |
658 } | |
659 } | |
660 } | |
661 if (!stream) { | |
662 MEDIA_LOG(ERROR, media_log_) << "Got unexpected video track" | |
663 << " track_id=" << track_id; | |
664 return false; | |
665 } | |
666 } | |
667 | |
668 track->set_id(stream->media_track_id()); | |
669 success &= stream->UpdateVideoConfig(video_config, media_log_); | |
526 } else { | 670 } else { |
527 MEDIA_LOG(ERROR, media_log_) << "Error: unsupported media track type " | 671 MEDIA_LOG(ERROR, media_log_) << "Error: unsupported media track type " |
528 << track->type(); | 672 << track->type(); |
529 return false; | 673 return false; |
530 } | 674 } |
531 } | 675 } |
532 | 676 |
533 DVLOG(1) << "OnNewConfigs(" << allow_audio << ", " << allow_video << ", " | 677 if (!expected_acodecs.empty() || !expected_vcodecs.empty()) { |
534 << audio_config.IsValidConfig() << ", " | 678 for (const auto& acodec : expected_acodecs) { |
535 << video_config.IsValidConfig() << ")"; | 679 MEDIA_LOG(ERROR, media_log_) << "Initialization segment misses expected " |
536 // MSE spec allows new configs to be emitted only during Append, but not | 680 << GetCodecName(acodec) << " track."; |
537 // during Flush or parser reset operations. | 681 } |
538 CHECK(append_in_progress_); | 682 for (const auto& vcodec : expected_vcodecs) { |
539 | 683 MEDIA_LOG(ERROR, media_log_) << "Initialization segment misses expected " |
540 if (!audio_config.IsValidConfig() && !video_config.IsValidConfig()) { | 684 << GetCodecName(vcodec) << " track."; |
541 DVLOG(1) << "OnNewConfigs() : Audio & video config are not valid!"; | 685 } |
542 return false; | 686 return false; |
543 } | 687 } |
544 | 688 |
545 // Signal an error if we get configuration info for stream types that weren't | |
546 // specified in AddId() or more configs after a stream is initialized. | |
547 if (allow_audio != audio_config.IsValidConfig()) { | |
548 MEDIA_LOG(ERROR, media_log_) | |
549 << "Initialization segment" | |
550 << (audio_config.IsValidConfig() ? " has" : " does not have") | |
551 << " an audio track, but the mimetype" | |
552 << (allow_audio ? " specifies" : " does not specify") | |
553 << " an audio codec."; | |
554 return false; | |
555 } | |
556 | |
557 if (allow_video != video_config.IsValidConfig()) { | |
558 MEDIA_LOG(ERROR, media_log_) | |
559 << "Initialization segment" | |
560 << (video_config.IsValidConfig() ? " has" : " does not have") | |
561 << " a video track, but the mimetype" | |
562 << (allow_video ? " specifies" : " does not specify") | |
563 << " a video codec."; | |
564 return false; | |
565 } | |
566 | |
567 bool success = true; | |
568 if (audio_config.IsValidConfig()) { | |
569 if (!audio_) { | |
570 media_log_->SetBooleanProperty("found_audio_stream", true); | |
571 } | |
572 if (!audio_ || | |
573 audio_->audio_decoder_config().codec() != audio_config.codec()) { | |
574 media_log_->SetStringProperty("audio_codec_name", | |
575 GetCodecName(audio_config.codec())); | |
576 } | |
577 | |
578 bool audio_stream_just_created = false; | |
579 if (!audio_) { | |
580 audio_ = create_demuxer_stream_cb_.Run(DemuxerStream::AUDIO); | |
581 | |
582 if (!audio_) { | |
583 DVLOG(1) << "Failed to create an audio stream."; | |
584 return false; | |
585 } | |
586 audio_stream_just_created = true; | |
587 | |
588 if (!frame_processor_->AddTrack(FrameProcessor::kAudioTrackId, audio_)) { | |
589 DVLOG(1) << "Failed to add audio track to frame processor."; | |
590 return false; | |
591 } | |
592 } | |
593 | |
594 frame_processor_->OnPossibleAudioConfigUpdate(audio_config); | |
595 success &= audio_->UpdateAudioConfig(audio_config, media_log_); | |
596 | |
597 if (audio_stream_just_created) { | |
598 std::string audio_buf_limit_switch = | |
599 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( | |
600 switches::kMSEAudioBufferSizeLimit); | |
601 unsigned audio_buf_size_limit = 0; | |
602 if (base::StringToUint(audio_buf_limit_switch, &audio_buf_size_limit) && | |
603 audio_buf_size_limit > 0) { | |
604 MEDIA_LOG(INFO, media_log_) << "Custom audio SourceBuffer size limit=" | |
605 << audio_buf_size_limit; | |
606 audio_->SetStreamMemoryLimit(audio_buf_size_limit); | |
607 } | |
608 } | |
609 } | |
610 | |
611 if (video_config.IsValidConfig()) { | |
612 if (!video_) { | |
613 media_log_->SetBooleanProperty("found_video_stream", true); | |
614 } | |
615 if (!video_ || | |
616 video_->video_decoder_config().codec() != video_config.codec()) { | |
617 media_log_->SetStringProperty("video_codec_name", | |
618 GetCodecName(video_config.codec())); | |
619 } | |
620 | |
621 bool video_stream_just_created = false; | |
622 if (!video_) { | |
623 video_ = create_demuxer_stream_cb_.Run(DemuxerStream::VIDEO); | |
624 | |
625 if (!video_) { | |
626 DVLOG(1) << "Failed to create a video stream."; | |
627 return false; | |
628 } | |
629 video_stream_just_created = true; | |
630 | |
631 if (!frame_processor_->AddTrack(FrameProcessor::kVideoTrackId, video_)) { | |
632 DVLOG(1) << "Failed to add video track to frame processor."; | |
633 return false; | |
634 } | |
635 } | |
636 | |
637 success &= video_->UpdateVideoConfig(video_config, media_log_); | |
638 | |
639 if (video_stream_just_created) { | |
640 std::string video_buf_limit_switch = | |
641 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( | |
642 switches::kMSEVideoBufferSizeLimit); | |
643 unsigned video_buf_size_limit = 0; | |
644 if (base::StringToUint(video_buf_limit_switch, &video_buf_size_limit) && | |
645 video_buf_size_limit > 0) { | |
646 MEDIA_LOG(INFO, media_log_) << "Custom video SourceBuffer size limit=" | |
647 << video_buf_size_limit; | |
648 video_->SetStreamMemoryLimit(video_buf_size_limit); | |
649 } | |
650 } | |
651 } | |
652 | |
653 typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr; | 689 typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr; |
654 if (text_stream_map_.empty()) { | 690 if (text_stream_map_.empty()) { |
655 for (TextConfigItr itr = text_configs.begin(); itr != text_configs.end(); | 691 for (TextConfigItr itr = text_configs.begin(); itr != text_configs.end(); |
656 ++itr) { | 692 ++itr) { |
657 ChunkDemuxerStream* const text_stream = | 693 ChunkDemuxerStream* const text_stream = |
658 create_demuxer_stream_cb_.Run(DemuxerStream::TEXT); | 694 create_demuxer_stream_cb_.Run(DemuxerStream::TEXT); |
659 if (!frame_processor_->AddTrack(itr->first, text_stream)) { | 695 if (!frame_processor_->AddTrack(itr->first, text_stream)) { |
660 success &= false; | 696 success &= false; |
661 MEDIA_LOG(ERROR, media_log_) << "Failed to add text track ID " | 697 MEDIA_LOG(ERROR, media_log_) << "Failed to add text track ID " |
662 << itr->first << " to frame processor."; | 698 << itr->first << " to frame processor."; |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
718 success &= false; | 754 success &= false; |
719 MEDIA_LOG(ERROR, media_log_) << "New text track config for track ID " | 755 MEDIA_LOG(ERROR, media_log_) << "New text track config for track ID " |
720 << config_itr->first | 756 << config_itr->first |
721 << " does not match old one."; | 757 << " does not match old one."; |
722 break; | 758 break; |
723 } | 759 } |
724 } | 760 } |
725 } | 761 } |
726 } | 762 } |
727 | 763 |
764 if (audio_streams_.empty() && video_streams_.empty()) { | |
765 DVLOG(1) << __func__ << ": couldn't find a valid audio or video stream"; | |
766 return false; | |
767 } | |
768 | |
728 frame_processor_->SetAllTrackBuffersNeedRandomAccessPoint(); | 769 frame_processor_->SetAllTrackBuffersNeedRandomAccessPoint(); |
729 | 770 |
730 if (audio_track) { | 771 if (!first_init_segment_received_) { |
731 DCHECK(audio_); | 772 first_init_segment_received_ = true; |
732 audio_track->set_id(audio_->media_track_id()); | 773 SetStreamMemoryLimits(); |
733 } | |
734 if (video_track) { | |
735 DCHECK(video_); | |
736 video_track->set_id(video_->media_track_id()); | |
737 } | 774 } |
738 | 775 |
739 DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed"); | 776 DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed"); |
740 if (success) { | 777 if (success) { |
741 if (state_ == PENDING_PARSER_CONFIG) | 778 if (state_ == PENDING_PARSER_CONFIG) |
742 state_ = PENDING_PARSER_INIT; | 779 state_ = PENDING_PARSER_INIT; |
743 DCHECK(!init_segment_received_cb_.is_null()); | 780 DCHECK(!init_segment_received_cb_.is_null()); |
744 init_segment_received_cb_.Run(std::move(tracks)); | 781 init_segment_received_cb_.Run(std::move(tracks)); |
745 } | 782 } |
746 | 783 |
747 return success; | 784 return success; |
748 } | 785 } |
749 | 786 |
787 void MediaSourceState::SetStreamMemoryLimits() { | |
788 auto cmd_line = base::CommandLine::ForCurrentProcess(); | |
789 | |
790 std::string audio_buf_limit_switch = | |
791 cmd_line->GetSwitchValueASCII(switches::kMSEAudioBufferSizeLimit); | |
792 unsigned audio_buf_size_limit = 0; | |
793 if (base::StringToUint(audio_buf_limit_switch, &audio_buf_size_limit) && | |
794 audio_buf_size_limit > 0) { | |
795 MEDIA_LOG(INFO, media_log_) | |
796 << "Custom audio per-track SourceBuffer size limit=" | |
797 << audio_buf_size_limit; | |
798 for (const auto& it : audio_streams_) { | |
799 it.second->SetStreamMemoryLimit(audio_buf_size_limit); | |
800 } | |
801 } | |
802 | |
803 std::string video_buf_limit_switch = | |
804 cmd_line->GetSwitchValueASCII(switches::kMSEVideoBufferSizeLimit); | |
805 unsigned video_buf_size_limit = 0; | |
806 if (base::StringToUint(video_buf_limit_switch, &video_buf_size_limit) && | |
807 video_buf_size_limit > 0) { | |
808 MEDIA_LOG(INFO, media_log_) | |
809 << "Custom video per-track SourceBuffer size limit=" | |
810 << video_buf_size_limit; | |
811 for (const auto& it : video_streams_) { | |
812 it.second->SetStreamMemoryLimit(video_buf_size_limit); | |
813 } | |
814 } | |
815 } | |
816 | |
750 void MediaSourceState::OnNewMediaSegment() { | 817 void MediaSourceState::OnNewMediaSegment() { |
751 DVLOG(2) << "OnNewMediaSegment()"; | 818 DVLOG(2) << "OnNewMediaSegment()"; |
752 DCHECK_EQ(state_, PARSER_INITIALIZED); | 819 DCHECK_EQ(state_, PARSER_INITIALIZED); |
753 parsing_media_segment_ = true; | 820 parsing_media_segment_ = true; |
754 media_segment_contained_audio_frame_ = false; | 821 media_segment_has_data_for_track_.clear(); |
755 media_segment_contained_video_frame_ = false; | |
756 } | 822 } |
757 | 823 |
758 void MediaSourceState::OnEndOfMediaSegment() { | 824 void MediaSourceState::OnEndOfMediaSegment() { |
759 DVLOG(2) << "OnEndOfMediaSegment()"; | 825 DVLOG(2) << "OnEndOfMediaSegment()"; |
760 DCHECK_EQ(state_, PARSER_INITIALIZED); | 826 DCHECK_EQ(state_, PARSER_INITIALIZED); |
761 parsing_media_segment_ = false; | 827 parsing_media_segment_ = false; |
762 | 828 |
763 const bool missing_audio = audio_ && !media_segment_contained_audio_frame_; | 829 for (const auto& it : audio_streams_) { |
764 const bool missing_video = video_ && !media_segment_contained_video_frame_; | 830 if (!media_segment_has_data_for_track_[it.first]) { |
765 if (!missing_audio && !missing_video) | 831 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_missing_track_logs_, |
766 return; | 832 kMaxMissingTrackInSegmentLogs) |
767 | 833 << "Media segment did not contain any coded frames for track " |
768 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_missing_track_logs_, | 834 << it.first << ", mismatching initialization segment. Therefore, MSE" |
chcunningham
2016/09/16 00:05:06
I don't follow the "mismatching initialization seg
servolk
2016/09/16 00:34:31
This was added by Matt. IIUC the logic here is tha
chcunningham1
2016/09/16 16:49:19
Acknowledged.
| |
769 kMaxMissingTrackInSegmentLogs) | 835 " coded frame processing may not interoperably detect" |
770 << "Media segment did not contain any " | 836 " discontinuities in appended media."; |
771 << (missing_audio && missing_video ? "audio or video" | 837 } |
772 : missing_audio ? "audio" : "video") | 838 } |
773 << " coded frames, mismatching initialization segment. Therefore, MSE " | 839 for (const auto& it : video_streams_) { |
774 "coded frame processing may not interoperably detect discontinuities " | 840 if (!media_segment_has_data_for_track_[it.first]) { |
775 "in appended media."; | 841 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_missing_track_logs_, |
842 kMaxMissingTrackInSegmentLogs) | |
843 << "Media segment did not contain any coded frames for track " | |
844 << it.first << ", mismatching initialization segment. Therefore, MSE" | |
845 " coded frame processing may not interoperably detect" | |
846 " discontinuities in appended media."; | |
847 } | |
848 } | |
776 } | 849 } |
777 | 850 |
778 bool MediaSourceState::OnNewBuffers( | 851 bool MediaSourceState::OnNewBuffers( |
779 const StreamParser::BufferQueueMap& buffer_queue_map) { | 852 const StreamParser::BufferQueueMap& buffer_queue_map) { |
780 DVLOG(2) << "OnNewBuffers()"; | 853 DVLOG(2) << __func__ << " buffer_queues=" << buffer_queue_map.size(); |
781 DCHECK_EQ(state_, PARSER_INITIALIZED); | 854 DCHECK_EQ(state_, PARSER_INITIALIZED); |
782 DCHECK(timestamp_offset_during_append_); | 855 DCHECK(timestamp_offset_during_append_); |
783 DCHECK(parsing_media_segment_); | 856 DCHECK(parsing_media_segment_); |
784 | 857 |
785 for (const auto& it : buffer_queue_map) { | 858 for (const auto& it : buffer_queue_map) { |
786 const StreamParser::BufferQueue& bufq = it.second; | 859 const StreamParser::BufferQueue& bufq = it.second; |
787 DCHECK(!bufq.empty()); | 860 DCHECK(!bufq.empty()); |
788 if (bufq[0]->type() == DemuxerStream::AUDIO) { | 861 media_segment_has_data_for_track_[it.first] = true; |
789 media_segment_contained_audio_frame_ = true; | |
790 } else if (bufq[0]->type() == DemuxerStream::VIDEO) { | |
791 media_segment_contained_video_frame_ = true; | |
792 } | |
793 } | 862 } |
794 | 863 |
795 const TimeDelta timestamp_offset_before_processing = | 864 const TimeDelta timestamp_offset_before_processing = |
796 *timestamp_offset_during_append_; | 865 *timestamp_offset_during_append_; |
797 | 866 |
798 // Calculate the new timestamp offset for audio/video tracks if the stream | 867 // Calculate the new timestamp offset for audio/video tracks if the stream |
799 // parser has requested automatic updates. | 868 // parser has requested automatic updates. |
800 TimeDelta new_timestamp_offset = timestamp_offset_before_processing; | 869 TimeDelta new_timestamp_offset = timestamp_offset_before_processing; |
801 if (auto_update_timestamp_offset_) { | 870 if (auto_update_timestamp_offset_) { |
802 TimeDelta min_end_timestamp = kNoTimestamp; | 871 TimeDelta min_end_timestamp = kNoTimestamp; |
(...skipping 17 matching lines...) Expand all Loading... | |
820 } | 889 } |
821 | 890 |
822 // Only update the timestamp offset if the frame processor hasn't already. | 891 // Only update the timestamp offset if the frame processor hasn't already. |
823 if (auto_update_timestamp_offset_ && | 892 if (auto_update_timestamp_offset_ && |
824 timestamp_offset_before_processing == *timestamp_offset_during_append_) { | 893 timestamp_offset_before_processing == *timestamp_offset_during_append_) { |
825 *timestamp_offset_during_append_ = new_timestamp_offset; | 894 *timestamp_offset_during_append_ = new_timestamp_offset; |
826 } | 895 } |
827 | 896 |
828 return true; | 897 return true; |
829 } | 898 } |
830 | |
831 void MediaSourceState::OnSourceInitDone( | 899 void MediaSourceState::OnSourceInitDone( |
832 const StreamParser::InitParameters& params) { | 900 const StreamParser::InitParameters& params) { |
833 DCHECK_EQ(state_, PENDING_PARSER_INIT); | 901 DCHECK_EQ(state_, PENDING_PARSER_INIT); |
834 state_ = PARSER_INITIALIZED; | 902 state_ = PARSER_INITIALIZED; |
835 auto_update_timestamp_offset_ = params.auto_update_timestamp_offset; | 903 auto_update_timestamp_offset_ = params.auto_update_timestamp_offset; |
836 base::ResetAndReturn(&init_cb_).Run(params); | 904 base::ResetAndReturn(&init_cb_).Run(params); |
837 } | 905 } |
838 | 906 |
839 } // namespace media | 907 } // namespace media |
OLD | NEW |