Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1005)

Side by Side Diff: media/filters/ffmpeg_demuxer.cc

Issue 2284923003: Implement support for multiple tracks in FFmpegDemuxer (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: rebase Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « media/filters/ffmpeg_demuxer.h ('k') | media/filters/ffmpeg_demuxer_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/filters/ffmpeg_demuxer.h" 5 #include "media/filters/ffmpeg_demuxer.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <memory> 8 #include <memory>
9 #include <utility> 9 #include <utility>
10 10
(...skipping 881 matching lines...) Expand 10 before | Expand all | Expand 10 after
892 DCHECK(task_runner_->BelongsToCurrentThread()); 892 DCHECK(task_runner_->BelongsToCurrentThread());
893 893
894 // If Stop() has been called, then drop this call. 894 // If Stop() has been called, then drop this call.
895 if (!blocking_thread_.IsRunning()) 895 if (!blocking_thread_.IsRunning())
896 return; 896 return;
897 897
898 // This should only be called after the demuxer has been initialized. 898 // This should only be called after the demuxer has been initialized.
899 DCHECK_GT(streams_.size(), 0u); 899 DCHECK_GT(streams_.size(), 0u);
900 900
901 // Abort all outstanding reads. 901 // Abort all outstanding reads.
902 for (auto* stream : streams_) { 902 for (const auto& stream : streams_) {
903 if (stream) 903 if (stream)
904 stream->Abort(); 904 stream->Abort();
905 } 905 }
906 906
907 // It's important to invalidate read/seek completion callbacks to avoid any 907 // It's important to invalidate read/seek completion callbacks to avoid any
908 // errors that occur because of the data source abort. 908 // errors that occur because of the data source abort.
909 weak_factory_.InvalidateWeakPtrs(); 909 weak_factory_.InvalidateWeakPtrs();
910 data_source_->Abort(); 910 data_source_->Abort();
911 911
912 // Aborting the read may cause EOF to be marked, undo this. 912 // Aborting the read may cause EOF to be marked, undo this.
(...skipping 16 matching lines...) Expand all
929 // in the process of being fulfilled by the DataSource. 929 // in the process of being fulfilled by the DataSource.
930 data_source_->Stop(); 930 data_source_->Stop();
931 url_protocol_->Abort(); 931 url_protocol_->Abort();
932 932
933 // This will block until all tasks complete. Note that after this returns it's 933 // This will block until all tasks complete. Note that after this returns it's
934 // possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this 934 // possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this
935 // thread. Each of the reply task methods must check whether we've stopped the 935 // thread. Each of the reply task methods must check whether we've stopped the
936 // thread and drop their results on the floor. 936 // thread and drop their results on the floor.
937 blocking_thread_.Stop(); 937 blocking_thread_.Stop();
938 938
939 StreamVector::iterator iter; 939 for (const auto& stream : streams_) {
940 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { 940 if (stream)
941 if (*iter) 941 stream->Stop();
942 (*iter)->Stop();
943 } 942 }
944 943
945 data_source_ = NULL; 944 data_source_ = NULL;
946 945
947 // Invalidate WeakPtrs on |task_runner_|, destruction may happen on another 946 // Invalidate WeakPtrs on |task_runner_|, destruction may happen on another
948 // thread. 947 // thread.
949 weak_factory_.InvalidateWeakPtrs(); 948 weak_factory_.InvalidateWeakPtrs();
950 } 949 }
951 950
952 void FFmpegDemuxer::StartWaitingForSeek(base::TimeDelta seek_time) {} 951 void FFmpegDemuxer::StartWaitingForSeek(base::TimeDelta seek_time) {}
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
1014 return timeline_offset_; 1013 return timeline_offset_;
1015 } 1014 }
1016 1015
1017 DemuxerStream* FFmpegDemuxer::GetStream(DemuxerStream::Type type) { 1016 DemuxerStream* FFmpegDemuxer::GetStream(DemuxerStream::Type type) {
1018 DCHECK(task_runner_->BelongsToCurrentThread()); 1017 DCHECK(task_runner_->BelongsToCurrentThread());
1019 return GetFFmpegStream(type); 1018 return GetFFmpegStream(type);
1020 } 1019 }
1021 1020
1022 FFmpegDemuxerStream* FFmpegDemuxer::GetFFmpegStream( 1021 FFmpegDemuxerStream* FFmpegDemuxer::GetFFmpegStream(
1023 DemuxerStream::Type type) const { 1022 DemuxerStream::Type type) const {
1024 StreamVector::const_iterator iter; 1023 for (const auto& stream : streams_) {
1025 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { 1024 if (stream && stream->type() == type && stream->enabled()) {
1026 if (*iter && (*iter)->type() == type) { 1025 return stream.get();
1027 return *iter;
1028 } 1026 }
1029 } 1027 }
1030 return NULL; 1028 return NULL;
1031 } 1029 }
1032 1030
1033 base::TimeDelta FFmpegDemuxer::GetStartTime() const { 1031 base::TimeDelta FFmpegDemuxer::GetStartTime() const {
1034 return std::max(start_time_, base::TimeDelta()); 1032 return std::max(start_time_, base::TimeDelta());
1035 } 1033 }
1036 1034
1037 void FFmpegDemuxer::AddTextStreams() { 1035 void FFmpegDemuxer::AddTextStreams() {
1038 DCHECK(task_runner_->BelongsToCurrentThread()); 1036 DCHECK(task_runner_->BelongsToCurrentThread());
1039 1037
1040 for (StreamVector::size_type idx = 0; idx < streams_.size(); ++idx) { 1038 for (const auto& stream : streams_) {
1041 FFmpegDemuxerStream* stream = streams_[idx]; 1039 if (!stream || stream->type() != DemuxerStream::TEXT)
1042 if (stream == NULL || stream->type() != DemuxerStream::TEXT)
1043 continue; 1040 continue;
1044 1041
1045 TextKind kind = stream->GetTextKind(); 1042 TextKind kind = stream->GetTextKind();
1046 std::string title = stream->GetMetadata("title"); 1043 std::string title = stream->GetMetadata("title");
1047 std::string language = stream->GetMetadata("language"); 1044 std::string language = stream->GetMetadata("language");
1048 1045
1049 // TODO: Implement "id" metadata in FFMPEG. 1046 // TODO: Implement "id" metadata in FFMPEG.
1050 // See: http://crbug.com/323183 1047 // See: http://crbug.com/323183
1051 host_->AddTextStream(stream, TextTrackConfig(kind, title, language, 1048 host_->AddTextStream(stream.get(),
1052 std::string())); 1049 TextTrackConfig(kind, title, language, std::string()));
1053 } 1050 }
1054 } 1051 }
1055 1052
1056 int64_t FFmpegDemuxer::GetMemoryUsage() const { 1053 int64_t FFmpegDemuxer::GetMemoryUsage() const {
1057 int64_t allocation_size = 0; 1054 int64_t allocation_size = 0;
1058 for (auto* stream : streams_) { 1055 for (const auto& stream : streams_) {
1059 if (stream) 1056 if (stream)
1060 allocation_size += stream->MemoryUsage(); 1057 allocation_size += stream->MemoryUsage();
1061 } 1058 }
1062 return allocation_size; 1059 return allocation_size;
1063 } 1060 }
1064 1061
1065 void FFmpegDemuxer::OnEncryptedMediaInitData( 1062 void FFmpegDemuxer::OnEncryptedMediaInitData(
1066 EmeInitDataType init_data_type, 1063 EmeInitDataType init_data_type,
1067 const std::string& encryption_key_id) { 1064 const std::string& encryption_key_id) {
1068 std::vector<uint8_t> key_id_local(encryption_key_id.begin(), 1065 std::vector<uint8_t> key_id_local(encryption_key_id.begin(),
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
1198 const base::TimeDelta packet_pts = 1195 const base::TimeDelta packet_pts =
1199 ConvertFromTimeBase(stream->time_base, packet_buffer->pkt.pts); 1196 ConvertFromTimeBase(stream->time_base, packet_buffer->pkt.pts);
1200 if (packet_pts < start_time_estimates[stream->index]) 1197 if (packet_pts < start_time_estimates[stream->index])
1201 start_time_estimates[stream->index] = packet_pts; 1198 start_time_estimates[stream->index] = packet_pts;
1202 } 1199 }
1203 packet_buffer = packet_buffer->next; 1200 packet_buffer = packet_buffer->next;
1204 } 1201 }
1205 } 1202 }
1206 1203
1207 std::unique_ptr<MediaTracks> media_tracks(new MediaTracks()); 1204 std::unique_ptr<MediaTracks> media_tracks(new MediaTracks());
1208 AVStream* audio_stream = NULL;
1209 AudioDecoderConfig audio_config;
1210 AVStream* video_stream = NULL;
1211 VideoDecoderConfig video_config;
1212 1205
1213 DCHECK(track_id_to_demux_stream_map_.empty()); 1206 DCHECK(track_id_to_demux_stream_map_.empty());
1214 1207
1215 // If available, |start_time_| will be set to the lowest stream start time. 1208 // If available, |start_time_| will be set to the lowest stream start time.
1216 start_time_ = kInfiniteDuration; 1209 start_time_ = kInfiniteDuration;
1217 1210
1218 base::TimeDelta max_duration; 1211 base::TimeDelta max_duration;
1219 int detected_audio_track_count = 0; 1212 int detected_audio_track_count = 0;
1220 int detected_video_track_count = 0; 1213 int detected_video_track_count = 0;
1221 int detected_text_track_count = 0; 1214 int detected_text_track_count = 0;
1222 for (size_t i = 0; i < format_context->nb_streams; ++i) { 1215 for (size_t i = 0; i < format_context->nb_streams; ++i) {
1223 AVStream* stream = format_context->streams[i]; 1216 AVStream* stream = format_context->streams[i];
1224 const AVCodecContext* codec_context = stream->codec; 1217 const AVCodecContext* codec_context = stream->codec;
1225 const AVMediaType codec_type = codec_context->codec_type; 1218 const AVMediaType codec_type = codec_context->codec_type;
1226 1219
1227 if (codec_type == AVMEDIA_TYPE_AUDIO) { 1220 if (codec_type == AVMEDIA_TYPE_AUDIO) {
1228 // Log the codec detected, whether it is supported or not, and whether or 1221 // Log the codec detected, whether it is supported or not, and whether or
1229 // not we have already detected a supported codec in another stream. 1222 // not we have already detected a supported codec in another stream.
1230 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodecHash", 1223 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodecHash",
1231 HashCodecName(GetCodecName(codec_context))); 1224 HashCodecName(GetCodecName(codec_context)));
1232 detected_audio_track_count++; 1225 detected_audio_track_count++;
1233
1234 if (audio_stream) {
1235 MEDIA_LOG(INFO, media_log_) << GetDisplayName()
1236 << ": skipping extra audio track";
1237 continue;
1238 }
1239 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { 1226 } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1240 // Log the codec detected, whether it is supported or not, and whether or 1227 // Log the codec detected, whether it is supported or not, and whether or
1241 // not we have already detected a supported codec in another stream. 1228 // not we have already detected a supported codec in another stream.
1242 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodecHash", 1229 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodecHash",
1243 HashCodecName(GetCodecName(codec_context))); 1230 HashCodecName(GetCodecName(codec_context)));
1244 detected_video_track_count++; 1231 detected_video_track_count++;
1245 1232
1246 if (video_stream) {
1247 MEDIA_LOG(INFO, media_log_) << GetDisplayName()
1248 << ": skipping extra video track";
1249 continue;
1250 }
1251
1252 #if BUILDFLAG(ENABLE_HEVC_DEMUXING) 1233 #if BUILDFLAG(ENABLE_HEVC_DEMUXING)
1253 if (stream->codec->codec_id == AV_CODEC_ID_HEVC) { 1234 if (stream->codec->codec_id == AV_CODEC_ID_HEVC) {
1254 // If ffmpeg is built without HEVC parser/decoder support, it will be 1235 // If ffmpeg is built without HEVC parser/decoder support, it will be
1255 // able to demux HEVC based solely on container-provided information, 1236 // able to demux HEVC based solely on container-provided information,
1256 // but unable to get some of the parameters without parsing the stream 1237 // but unable to get some of the parameters without parsing the stream
1257 // (e.g. coded size needs to be read from SPS, pixel format is typically 1238 // (e.g. coded size needs to be read from SPS, pixel format is typically
1258 // deduced from decoder config in hvcC box). These are not really needed 1239 // deduced from decoder config in hvcC box). These are not really needed
1259 // when using external decoder (e.g. hardware decoder), so override them 1240 // when using external decoder (e.g. hardware decoder), so override them
1260 // here, to make sure this translates into a valid VideoDecoderConfig. 1241 // here, to make sure this translates into a valid VideoDecoderConfig.
1261 if (stream->codec->coded_width == 0 && 1242 if (stream->codec->coded_width == 0 &&
(...skipping 16 matching lines...) Expand all
1278 } else { 1259 } else {
1279 continue; 1260 continue;
1280 } 1261 }
1281 1262
1282 // Attempt to create a FFmpegDemuxerStream from the AVStream. This will 1263 // Attempt to create a FFmpegDemuxerStream from the AVStream. This will
1283 // return nullptr if the AVStream is invalid. Validity checks will verify 1264 // return nullptr if the AVStream is invalid. Validity checks will verify
1284 // things like: codec, channel layout, sample/pixel format, etc... 1265 // things like: codec, channel layout, sample/pixel format, etc...
1285 std::unique_ptr<FFmpegDemuxerStream> demuxer_stream = 1266 std::unique_ptr<FFmpegDemuxerStream> demuxer_stream =
1286 FFmpegDemuxerStream::Create(this, stream, media_log_); 1267 FFmpegDemuxerStream::Create(this, stream, media_log_);
1287 if (demuxer_stream.get()) { 1268 if (demuxer_stream.get()) {
1288 streams_[i] = demuxer_stream.release(); 1269 streams_[i] = std::move(demuxer_stream);
1289 } else { 1270 } else {
1290 if (codec_type == AVMEDIA_TYPE_AUDIO) { 1271 if (codec_type == AVMEDIA_TYPE_AUDIO) {
1291 MEDIA_LOG(INFO, media_log_) 1272 MEDIA_LOG(INFO, media_log_)
1292 << GetDisplayName() 1273 << GetDisplayName()
1293 << ": skipping invalid or unsupported audio track"; 1274 << ": skipping invalid or unsupported audio track";
1294 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { 1275 } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1295 MEDIA_LOG(INFO, media_log_) 1276 MEDIA_LOG(INFO, media_log_)
1296 << GetDisplayName() 1277 << GetDisplayName()
1297 << ": skipping invalid or unsupported video track"; 1278 << ": skipping invalid or unsupported video track";
1298 } 1279 }
(...skipping 13 matching lines...) Expand all
1312 // Need to fix that and use it as track id. crbug.com/323183 1293 // Need to fix that and use it as track id. crbug.com/323183
1313 track_id = 1294 track_id =
1314 static_cast<StreamParser::TrackId>(media_tracks->tracks().size() + 1); 1295 static_cast<StreamParser::TrackId>(media_tracks->tracks().size() + 1);
1315 track_label = streams_[i]->GetMetadata("title"); 1296 track_label = streams_[i]->GetMetadata("title");
1316 } 1297 }
1317 1298
1318 // Note when we find our audio/video stream (we only want one of each) and 1299 // Note when we find our audio/video stream (we only want one of each) and
1319 // record src= playback UMA stats for the stream's decoder config. 1300 // record src= playback UMA stats for the stream's decoder config.
1320 MediaTrack* media_track = nullptr; 1301 MediaTrack* media_track = nullptr;
1321 if (codec_type == AVMEDIA_TYPE_AUDIO) { 1302 if (codec_type == AVMEDIA_TYPE_AUDIO) {
1322 CHECK(!audio_stream); 1303 AudioDecoderConfig audio_config = streams_[i]->audio_decoder_config();
1323 audio_stream = stream;
1324 audio_config = streams_[i]->audio_decoder_config();
1325 RecordAudioCodecStats(audio_config); 1304 RecordAudioCodecStats(audio_config);
1326 1305
1327 media_track = media_tracks->AddAudioTrack(audio_config, track_id, "main", 1306 media_track = media_tracks->AddAudioTrack(audio_config, track_id, "main",
1328 track_label, track_language); 1307 track_label, track_language);
1329 media_track->set_id(base::UintToString(track_id)); 1308 media_track->set_id(base::UintToString(track_id));
1330 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) == 1309 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) ==
1331 track_id_to_demux_stream_map_.end()); 1310 track_id_to_demux_stream_map_.end());
1332 track_id_to_demux_stream_map_[media_track->id()] = streams_[i]; 1311 track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get();
1333 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { 1312 } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1334 CHECK(!video_stream); 1313 VideoDecoderConfig video_config = streams_[i]->video_decoder_config();
1335 video_stream = stream;
1336 video_config = streams_[i]->video_decoder_config();
1337 1314
1338 RecordVideoCodecStats(video_config, stream->codec->color_range, 1315 RecordVideoCodecStats(video_config, stream->codec->color_range,
1339 media_log_.get()); 1316 media_log_.get());
1340 1317
1341 media_track = media_tracks->AddVideoTrack(video_config, track_id, "main", 1318 media_track = media_tracks->AddVideoTrack(video_config, track_id, "main",
1342 track_label, track_language); 1319 track_label, track_language);
1343 media_track->set_id(base::UintToString(track_id)); 1320 media_track->set_id(base::UintToString(track_id));
1344 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) == 1321 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) ==
1345 track_id_to_demux_stream_map_.end()); 1322 track_id_to_demux_stream_map_.end());
1346 track_id_to_demux_stream_map_[media_track->id()] = streams_[i]; 1323 track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get();
1347 } 1324 }
1348 1325
1349 max_duration = std::max(max_duration, streams_[i]->duration()); 1326 max_duration = std::max(max_duration, streams_[i]->duration());
1350 1327
1351 const base::TimeDelta start_time = 1328 const base::TimeDelta start_time =
1352 ExtractStartTime(stream, start_time_estimates[i]); 1329 ExtractStartTime(stream, start_time_estimates[i]);
1353 const bool has_start_time = start_time != kNoTimestamp; 1330 const bool has_start_time = start_time != kNoTimestamp;
1354 1331
1355 if (!has_start_time) 1332 if (!has_start_time)
1356 continue; 1333 continue;
1357 1334
1358 streams_[i]->set_start_time(start_time); 1335 streams_[i]->set_start_time(start_time);
1359 if (start_time < start_time_) { 1336 if (start_time < start_time_) {
1360 start_time_ = start_time; 1337 start_time_ = start_time;
1361 } 1338 }
1362 } 1339 }
1363 1340
1364 RecordDetectedTrackTypeStats(detected_audio_track_count, 1341 RecordDetectedTrackTypeStats(detected_audio_track_count,
1365 detected_video_track_count, 1342 detected_video_track_count,
1366 detected_text_track_count); 1343 detected_text_track_count);
1367 1344
1368 if (!audio_stream && !video_stream) { 1345 if (media_tracks->tracks().empty()) {
1369 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() 1346 MEDIA_LOG(ERROR, media_log_) << GetDisplayName()
1370 << ": no supported streams"; 1347 << ": no supported streams";
1371 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); 1348 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
1372 return; 1349 return;
1373 } 1350 }
1374 1351
1375 if (text_enabled_) 1352 if (text_enabled_)
1376 AddTextStreams(); 1353 AddTextStreams();
1377 1354
1378 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { 1355 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) {
(...skipping 17 matching lines...) Expand all
1396 // not be exposed to negative timestamps. Which means we need to rebase these 1373 // not be exposed to negative timestamps. Which means we need to rebase these
1397 // negative timestamps and mark them for discard post decoding. 1374 // negative timestamps and mark them for discard post decoding.
1398 // 1375 //
1399 // Post-decode frame dropping for packets with negative timestamps is outlined 1376 // Post-decode frame dropping for packets with negative timestamps is outlined
1400 // in section A.2 in the Ogg Vorbis spec: 1377 // in section A.2 in the Ogg Vorbis spec:
1401 // http://xiph.org/vorbis/doc/Vorbis_I_spec.html 1378 // http://xiph.org/vorbis/doc/Vorbis_I_spec.html
1402 // 1379 //
1403 // FFmpeg's use of negative timestamps for opus pre-skip is nonstandard, but 1380 // FFmpeg's use of negative timestamps for opus pre-skip is nonstandard, but
1404 // for more information on pre-skip see section 4.2 of the Ogg Opus spec: 1381 // for more information on pre-skip see section 4.2 of the Ogg Opus spec:
1405 // https://tools.ietf.org/html/draft-ietf-codec-oggopus-08#section-4.2 1382 // https://tools.ietf.org/html/draft-ietf-codec-oggopus-08#section-4.2
1406 if (audio_stream && (audio_stream->codec->codec_id == AV_CODEC_ID_OPUS || 1383 for (const auto& stream : streams_) {
1407 (strcmp(format_context->iformat->name, "ogg") == 0 && 1384 if (!stream || stream->type() != DemuxerStream::AUDIO)
1408 audio_stream->codec->codec_id == AV_CODEC_ID_VORBIS))) { 1385 continue;
1409 for (size_t i = 0; i < streams_.size(); ++i) { 1386 const AVStream* audio_stream = stream->av_stream();
1410 if (!streams_[i]) 1387 DCHECK(audio_stream);
1411 continue; 1388 if (audio_stream->codec->codec_id == AV_CODEC_ID_OPUS ||
1412 streams_[i]->enable_negative_timestamp_fixups(); 1389 (strcmp(format_context->iformat->name, "ogg") == 0 &&
1390 audio_stream->codec->codec_id == AV_CODEC_ID_VORBIS)) {
1391 for (size_t i = 0; i < streams_.size(); ++i) {
1392 if (!streams_[i])
1393 continue;
1394 streams_[i]->enable_negative_timestamp_fixups();
1413 1395
1414 // Fixup the seeking information to avoid selecting the audio stream 1396 // Fixup the seeking information to avoid selecting the audio stream
1415 // simply because it has a lower starting time. 1397 // simply because it has a lower starting time.
1416 if (streams_[i]->av_stream() == audio_stream && 1398 if (streams_[i]->av_stream() == audio_stream &&
1417 streams_[i]->start_time() < base::TimeDelta()) { 1399 streams_[i]->start_time() < base::TimeDelta()) {
1418 streams_[i]->set_start_time(base::TimeDelta()); 1400 streams_[i]->set_start_time(base::TimeDelta());
1401 }
1419 } 1402 }
1420 } 1403 }
1421 } 1404 }
1422 1405
1423 // If no start time could be determined, default to zero. 1406 // If no start time could be determined, default to zero.
1424 if (start_time_ == kInfiniteDuration) { 1407 if (start_time_ == kInfiniteDuration) {
1425 start_time_ = base::TimeDelta(); 1408 start_time_ = base::TimeDelta();
1426 } 1409 }
1427 1410
1428 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS 1411 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS
(...skipping 22 matching lines...) Expand all
1451 // initializing. 1434 // initializing.
1452 host_->SetDuration(max_duration); 1435 host_->SetDuration(max_duration);
1453 duration_known_ = (max_duration != kInfiniteDuration); 1436 duration_known_ = (max_duration != kInfiniteDuration);
1454 1437
1455 int64_t filesize_in_bytes = 0; 1438 int64_t filesize_in_bytes = 0;
1456 url_protocol_->GetSize(&filesize_in_bytes); 1439 url_protocol_->GetSize(&filesize_in_bytes);
1457 bitrate_ = CalculateBitrate(format_context, max_duration, filesize_in_bytes); 1440 bitrate_ = CalculateBitrate(format_context, max_duration, filesize_in_bytes);
1458 if (bitrate_ > 0) 1441 if (bitrate_ > 0)
1459 data_source_->SetBitrate(bitrate_); 1442 data_source_->SetBitrate(bitrate_);
1460 1443
1444 LogMetadata(format_context, max_duration);
1445 media_tracks_updated_cb_.Run(std::move(media_tracks));
1446
1447 status_cb.Run(PIPELINE_OK);
1448 }
1449
1450 void FFmpegDemuxer::LogMetadata(AVFormatContext* avctx,
1451 base::TimeDelta max_duration) {
1461 // Use a single MediaLogEvent to batch all parameter updates at once; this 1452 // Use a single MediaLogEvent to batch all parameter updates at once; this
1462 // prevents throttling of events due to the large number of updates here. 1453 // prevents throttling of events due to the large number of updates here.
1463 std::unique_ptr<MediaLogEvent> metadata_event = 1454 std::unique_ptr<MediaLogEvent> metadata_event =
1464 media_log_->CreateEvent(MediaLogEvent::PROPERTY_CHANGE); 1455 media_log_->CreateEvent(MediaLogEvent::PROPERTY_CHANGE);
1465 1456
1466 // Audio logging. 1457 DCHECK_EQ(avctx->nb_streams, streams_.size());
1467 metadata_event->params.SetBoolean("found_audio_stream", !!audio_stream); 1458 auto& params = metadata_event->params;
1468 if (audio_stream) { 1459 int audio_track_count = 0;
1469 const AVCodecContext* audio_codec = audio_stream->codec; 1460 int video_track_count = 0;
1470 metadata_event->params.SetString("audio_codec_name", 1461 for (size_t i = 0; i < streams_.size(); ++i) {
1471 GetCodecName(audio_codec)); 1462 FFmpegDemuxerStream* stream = streams_[i].get();
1472 metadata_event->params.SetInteger("audio_channels_count", 1463 if (!stream)
1473 audio_codec->channels); 1464 continue;
1474 metadata_event->params.SetString( 1465 if (stream->type() == DemuxerStream::AUDIO) {
1475 "audio_sample_format", 1466 ++audio_track_count;
1476 SampleFormatToString(audio_config.sample_format())); 1467 std::string suffix = "";
1477 metadata_event->params.SetInteger("audio_samples_per_second", 1468 if (audio_track_count > 1)
1478 audio_config.samples_per_second()); 1469 suffix = "_track" + base::IntToString(audio_track_count);
1470 const AVCodecContext* audio_codec = avctx->streams[i]->codec;
1471 const AudioDecoderConfig& audio_config = stream->audio_decoder_config();
1472 params.SetString("audio_codec_name" + suffix, GetCodecName(audio_codec));
1473 params.SetInteger("audio_channels_count" + suffix, audio_codec->channels);
1474 params.SetString("audio_sample_format" + suffix,
1475 SampleFormatToString(audio_config.sample_format()));
1476 params.SetInteger("audio_samples_per_second" + suffix,
1477 audio_config.samples_per_second());
1478 } else if (stream->type() == DemuxerStream::VIDEO) {
1479 ++video_track_count;
1480 std::string suffix = "";
1481 if (video_track_count > 1)
1482 suffix = "_track" + base::IntToString(video_track_count);
1483 const AVCodecContext* video_codec = avctx->streams[i]->codec;
1484 const VideoDecoderConfig& video_config = stream->video_decoder_config();
1485 params.SetString("video_codec_name" + suffix, GetCodecName(video_codec));
1486 params.SetInteger("width" + suffix, video_codec->width);
1487 params.SetInteger("height" + suffix, video_codec->height);
1488 params.SetInteger("coded_width" + suffix, video_codec->coded_width);
1489 params.SetInteger("coded_height" + suffix, video_codec->coded_height);
1490 params.SetString("time_base" + suffix,
1491 base::StringPrintf("%d/%d", video_codec->time_base.num,
1492 video_codec->time_base.den));
1493 params.SetString("video_format" + suffix,
1494 VideoPixelFormatToString(video_config.format()));
1495 params.SetBoolean("video_is_encrypted" + suffix,
1496 video_config.is_encrypted());
1497 }
1479 } 1498 }
1480 1499 params.SetBoolean("found_audio_stream", (audio_track_count > 0));
1481 // Video logging 1500 params.SetBoolean("found_video_stream", (video_track_count > 0));
1482 metadata_event->params.SetBoolean("found_video_stream", !!video_stream);
1483 if (video_stream) {
1484 const AVCodecContext* video_codec = video_stream->codec;
1485 metadata_event->params.SetString("video_codec_name",
1486 GetCodecName(video_codec));
1487 metadata_event->params.SetInteger("width", video_codec->width);
1488 metadata_event->params.SetInteger("height", video_codec->height);
1489 metadata_event->params.SetInteger("coded_width", video_codec->coded_width);
1490 metadata_event->params.SetInteger("coded_height",
1491 video_codec->coded_height);
1492 metadata_event->params.SetString(
1493 "time_base", base::StringPrintf("%d/%d", video_codec->time_base.num,
1494 video_codec->time_base.den));
1495 metadata_event->params.SetString(
1496 "video_format", VideoPixelFormatToString(video_config.format()));
1497 metadata_event->params.SetBoolean("video_is_encrypted",
1498 video_config.is_encrypted());
1499 }
1500
1501 SetTimeProperty(metadata_event.get(), "max_duration", max_duration); 1501 SetTimeProperty(metadata_event.get(), "max_duration", max_duration);
1502 SetTimeProperty(metadata_event.get(), "start_time", start_time_); 1502 SetTimeProperty(metadata_event.get(), "start_time", start_time_);
1503 metadata_event->params.SetInteger("bitrate", bitrate_); 1503 metadata_event->params.SetInteger("bitrate", bitrate_);
1504 media_log_->AddEvent(std::move(metadata_event)); 1504 media_log_->AddEvent(std::move(metadata_event));
1505
1506 media_tracks_updated_cb_.Run(std::move(media_tracks));
1507
1508 status_cb.Run(PIPELINE_OK);
1509 } 1505 }
1510 1506
1511 FFmpegDemuxerStream* FFmpegDemuxer::FindPreferredStreamForSeeking( 1507 FFmpegDemuxerStream* FFmpegDemuxer::FindPreferredStreamForSeeking(
1512 base::TimeDelta seek_time) { 1508 base::TimeDelta seek_time) {
1513 // If we have a selected/enabled video stream and its start time is lower 1509 // If we have a selected/enabled video stream and its start time is lower
1514 // than the |seek_time| or unknown, then always prefer it for seeking. 1510 // than the |seek_time| or unknown, then always prefer it for seeking.
1515 FFmpegDemuxerStream* video_stream = nullptr; 1511 FFmpegDemuxerStream* video_stream = nullptr;
1516 for (const auto& stream : streams_) { 1512 for (const auto& stream : streams_) {
1517 if (stream && stream->type() == DemuxerStream::VIDEO && stream->enabled()) { 1513 if (stream && stream->type() == DemuxerStream::VIDEO && stream->enabled()) {
1518 video_stream = stream; 1514 video_stream = stream.get();
1519 if (video_stream->start_time() == kNoTimestamp || 1515 if (video_stream->start_time() == kNoTimestamp ||
1520 video_stream->start_time() <= seek_time) { 1516 video_stream->start_time() <= seek_time) {
1521 return stream; 1517 return video_stream;
1522 } 1518 }
1523 break; 1519 break;
1524 } 1520 }
1525 } 1521 }
1526 1522
1527 // If video stream is not present or |seek_time| is lower than the video start 1523 // If video stream is not present or |seek_time| is lower than the video start
1528 // time, then try to find an enabled stream with the lowest start time. 1524 // time, then try to find an enabled stream with the lowest start time.
1529 FFmpegDemuxerStream* lowest_start_time_stream = nullptr; 1525 FFmpegDemuxerStream* lowest_start_time_stream = nullptr;
1530 for (const auto& stream : streams_) { 1526 for (const auto& stream : streams_) {
1531 if (!stream || !stream->enabled() || stream->start_time() == kNoTimestamp) 1527 if (!stream || !stream->enabled() || stream->start_time() == kNoTimestamp)
1532 continue; 1528 continue;
1533 if (!lowest_start_time_stream || 1529 if (!lowest_start_time_stream ||
1534 stream->start_time() < lowest_start_time_stream->start_time()) { 1530 stream->start_time() < lowest_start_time_stream->start_time()) {
1535 lowest_start_time_stream = stream; 1531 lowest_start_time_stream = stream.get();
1536 } 1532 }
1537 } 1533 }
1538 // If we found a stream with start time lower than |seek_time|, then use it. 1534 // If we found a stream with start time lower than |seek_time|, then use it.
1539 if (lowest_start_time_stream && 1535 if (lowest_start_time_stream &&
1540 lowest_start_time_stream->start_time() <= seek_time) { 1536 lowest_start_time_stream->start_time() <= seek_time) {
1541 return lowest_start_time_stream; 1537 return lowest_start_time_stream;
1542 } 1538 }
1543 1539
1544 // If we couldn't find any streams with the start time lower than |seek_time| 1540 // If we couldn't find any streams with the start time lower than |seek_time|
1545 // then use either video (if one exists) or any audio stream. 1541 // then use either video (if one exists) or any audio stream.
(...skipping 12 matching lines...) Expand all
1558 } 1554 }
1559 1555
1560 if (result < 0) { 1556 if (result < 0) {
1561 // Use VLOG(1) instead of NOTIMPLEMENTED() to prevent the message being 1557 // Use VLOG(1) instead of NOTIMPLEMENTED() to prevent the message being
1562 // captured from stdout and contaminates testing. 1558 // captured from stdout and contaminates testing.
1563 // TODO(scherkus): Implement this properly and signal error (BUG=23447). 1559 // TODO(scherkus): Implement this properly and signal error (BUG=23447).
1564 VLOG(1) << "Not implemented"; 1560 VLOG(1) << "Not implemented";
1565 } 1561 }
1566 1562
1567 // Tell streams to flush buffers due to seeking. 1563 // Tell streams to flush buffers due to seeking.
1568 StreamVector::iterator iter; 1564 for (const auto& stream : streams_) {
1569 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { 1565 if (stream)
1570 if (*iter) 1566 stream->FlushBuffers();
1571 (*iter)->FlushBuffers();
1572 } 1567 }
1573 1568
1574 // Resume reading until capacity. 1569 // Resume reading until capacity.
1575 ReadFrameIfNeeded(); 1570 ReadFrameIfNeeded();
1576 1571
1577 // Notify we're finished seeking. 1572 // Notify we're finished seeking.
1578 base::ResetAndReturn(&pending_seek_cb_).Run(PIPELINE_OK); 1573 base::ResetAndReturn(&pending_seek_cb_).Run(PIPELINE_OK);
1579 } 1574 }
1580 1575
1581 void FFmpegDemuxer::OnEnabledAudioTracksChanged( 1576 void FFmpegDemuxer::OnEnabledAudioTracksChanged(
1582 const std::vector<MediaTrack::Id>& track_ids, 1577 const std::vector<MediaTrack::Id>& track_ids,
1583 base::TimeDelta currTime) { 1578 base::TimeDelta currTime) {
1584 DCHECK(task_runner_->BelongsToCurrentThread()); 1579 DCHECK(task_runner_->BelongsToCurrentThread());
1585 bool enabled = false; 1580
1586 DemuxerStream* audio_stream = GetStream(DemuxerStream::AUDIO); 1581 std::set<DemuxerStream*> enabled_streams;
1587 CHECK(audio_stream); 1582 for (const auto& id : track_ids) {
1588 if (track_ids.size() > 0) { 1583 DemuxerStream* stream = track_id_to_demux_stream_map_[id];
1589 DCHECK(track_id_to_demux_stream_map_[track_ids[0]] == audio_stream); 1584 DCHECK(stream);
1590 enabled = true; 1585 DCHECK_EQ(DemuxerStream::AUDIO, stream->type());
1586 enabled_streams.insert(stream);
1591 } 1587 }
1592 DVLOG(1) << __func__ << ": " << (enabled ? "enabling" : "disabling") 1588
1593 << " audio stream"; 1589 // First disable all streams that need to be disabled and then enable streams
1594 audio_stream->set_enabled(enabled, currTime); 1590 // that are enabled.
1591 for (const auto& stream : streams_) {
1592 if (stream->type() == DemuxerStream::AUDIO &&
1593 enabled_streams.find(stream.get()) == enabled_streams.end()) {
1594 DVLOG(1) << __func__ << ": disabling stream " << stream.get();
1595 stream->set_enabled(false, currTime);
1596 }
1597 }
1598 for (const auto& stream : enabled_streams) {
1599 DVLOG(1) << __func__ << ": enabling stream " << stream;
1600 stream->set_enabled(true, currTime);
1601 }
1595 } 1602 }
1596 1603
1597 void FFmpegDemuxer::OnSelectedVideoTrackChanged( 1604 void FFmpegDemuxer::OnSelectedVideoTrackChanged(
1598 const std::vector<MediaTrack::Id>& track_ids, 1605 const std::vector<MediaTrack::Id>& track_ids,
1599 base::TimeDelta currTime) { 1606 base::TimeDelta currTime) {
1600 DCHECK(task_runner_->BelongsToCurrentThread()); 1607 DCHECK(task_runner_->BelongsToCurrentThread());
1601 bool enabled = false; 1608 DCHECK_LE(track_ids.size(), 1u);
1602 DemuxerStream* video_stream = GetStream(DemuxerStream::VIDEO); 1609
1603 CHECK(video_stream); 1610 DemuxerStream* selected_stream = nullptr;
1604 if (track_ids.size() > 0) { 1611 if (!track_ids.empty()) {
1605 DCHECK(track_id_to_demux_stream_map_[track_ids[0]] == video_stream); 1612 selected_stream = track_id_to_demux_stream_map_[track_ids[0]];
1606 enabled = true; 1613 DCHECK(selected_stream);
1614 DCHECK_EQ(DemuxerStream::VIDEO, selected_stream->type());
1607 } 1615 }
1608 DVLOG(1) << __func__ << ": " << (enabled ? "enabling" : "disabling") 1616
1609 << " video stream"; 1617 // First disable all streams that need to be disabled and then enable the
1610 video_stream->set_enabled(enabled, currTime); 1618 // stream that needs to be enabled (if any).
1619 for (const auto& stream : streams_) {
1620 if (stream->type() == DemuxerStream::VIDEO &&
1621 stream.get() != selected_stream) {
1622 DVLOG(1) << __func__ << ": disabling stream " << stream.get();
1623 stream->set_enabled(false, currTime);
1624 }
1625 }
1626 if (selected_stream) {
1627 DVLOG(1) << __func__ << ": enabling stream " << selected_stream;
1628 selected_stream->set_enabled(true, currTime);
1629 }
1611 } 1630 }
1612 1631
1613 void FFmpegDemuxer::ReadFrameIfNeeded() { 1632 void FFmpegDemuxer::ReadFrameIfNeeded() {
1614 DCHECK(task_runner_->BelongsToCurrentThread()); 1633 DCHECK(task_runner_->BelongsToCurrentThread());
1615 1634
1616 // Make sure we have work to do before reading. 1635 // Make sure we have work to do before reading.
1617 if (!blocking_thread_.IsRunning() || !StreamsHaveAvailableCapacity() || 1636 if (!blocking_thread_.IsRunning() || !StreamsHaveAvailableCapacity() ||
1618 pending_read_ || !pending_seek_cb_.is_null()) { 1637 pending_read_ || !pending_seek_cb_.is_null()) {
1619 return; 1638 return;
1620 } 1639 }
(...skipping 26 matching lines...) Expand all
1647 // - either underlying ffmpeg returned an error 1666 // - either underlying ffmpeg returned an error
1648 // - or FFMpegDemuxer reached the maximum allowed memory usage. 1667 // - or FFMpegDemuxer reached the maximum allowed memory usage.
1649 if (result < 0 || IsMaxMemoryUsageReached()) { 1668 if (result < 0 || IsMaxMemoryUsageReached()) {
1650 DVLOG(1) << __func__ << " result=" << result 1669 DVLOG(1) << __func__ << " result=" << result
1651 << " IsMaxMemoryUsageReached=" << IsMaxMemoryUsageReached(); 1670 << " IsMaxMemoryUsageReached=" << IsMaxMemoryUsageReached();
1652 // Update the duration based on the highest elapsed time across all streams 1671 // Update the duration based on the highest elapsed time across all streams
1653 // if it was previously unknown. 1672 // if it was previously unknown.
1654 if (!duration_known_) { 1673 if (!duration_known_) {
1655 base::TimeDelta max_duration; 1674 base::TimeDelta max_duration;
1656 1675
1657 for (StreamVector::iterator iter = streams_.begin(); 1676 for (const auto& stream : streams_) {
1658 iter != streams_.end(); 1677 if (!stream)
1659 ++iter) {
1660 if (!*iter)
1661 continue; 1678 continue;
1662 1679
1663 base::TimeDelta duration = (*iter)->GetElapsedTime(); 1680 base::TimeDelta duration = stream->GetElapsedTime();
1664 if (duration != kNoTimestamp && duration > max_duration) 1681 if (duration != kNoTimestamp && duration > max_duration)
1665 max_duration = duration; 1682 max_duration = duration;
1666 } 1683 }
1667 1684
1668 if (max_duration > base::TimeDelta()) { 1685 if (max_duration > base::TimeDelta()) {
1669 host_->SetDuration(max_duration); 1686 host_->SetDuration(max_duration);
1670 duration_known_ = true; 1687 duration_known_ = true;
1671 } 1688 }
1672 } 1689 }
1673 // If we have reached the end of stream, tell the downstream filters about 1690 // If we have reached the end of stream, tell the downstream filters about
(...skipping 14 matching lines...) Expand all
1688 // when av_read_frame() returns success code. See bug comment for ideas: 1705 // when av_read_frame() returns success code. See bug comment for ideas:
1689 // 1706 //
1690 // https://code.google.com/p/chromium/issues/detail?id=169133#c10 1707 // https://code.google.com/p/chromium/issues/detail?id=169133#c10
1691 if (!packet->data) { 1708 if (!packet->data) {
1692 ScopedAVPacket new_packet(new AVPacket()); 1709 ScopedAVPacket new_packet(new AVPacket());
1693 av_new_packet(new_packet.get(), 0); 1710 av_new_packet(new_packet.get(), 0);
1694 av_packet_copy_props(new_packet.get(), packet.get()); 1711 av_packet_copy_props(new_packet.get(), packet.get());
1695 packet.swap(new_packet); 1712 packet.swap(new_packet);
1696 } 1713 }
1697 1714
1698 FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index]; 1715 FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index].get();
1699 if (demuxer_stream->enabled()) 1716 if (demuxer_stream->enabled())
1700 demuxer_stream->EnqueuePacket(std::move(packet)); 1717 demuxer_stream->EnqueuePacket(std::move(packet));
1701 } 1718 }
1702 1719
1703 // Keep reading until we've reached capacity. 1720 // Keep reading until we've reached capacity.
1704 ReadFrameIfNeeded(); 1721 ReadFrameIfNeeded();
1705 } 1722 }
1706 1723
1707 bool FFmpegDemuxer::StreamsHaveAvailableCapacity() { 1724 bool FFmpegDemuxer::StreamsHaveAvailableCapacity() {
1708 DCHECK(task_runner_->BelongsToCurrentThread()); 1725 DCHECK(task_runner_->BelongsToCurrentThread());
1709 StreamVector::iterator iter; 1726 for (const auto& stream : streams_) {
1710 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { 1727 if (stream && stream->HasAvailableCapacity())
1711 if (*iter && (*iter)->HasAvailableCapacity()) {
1712 return true; 1728 return true;
1713 }
1714 } 1729 }
1715 return false; 1730 return false;
1716 } 1731 }
1717 1732
1718 bool FFmpegDemuxer::IsMaxMemoryUsageReached() const { 1733 bool FFmpegDemuxer::IsMaxMemoryUsageReached() const {
1719 DCHECK(task_runner_->BelongsToCurrentThread()); 1734 DCHECK(task_runner_->BelongsToCurrentThread());
1720 1735
1721 // Max allowed memory usage, all streams combined. 1736 // Max allowed memory usage, all streams combined.
1722 const size_t kDemuxerMemoryLimit = 150 * 1024 * 1024; 1737 const size_t kDemuxerMemoryLimit = 150 * 1024 * 1024;
1723 1738
1724 size_t memory_left = kDemuxerMemoryLimit; 1739 size_t memory_left = kDemuxerMemoryLimit;
1725 for (StreamVector::const_iterator iter = streams_.begin(); 1740 for (const auto& stream : streams_) {
1726 iter != streams_.end(); ++iter) { 1741 if (!stream)
1727 if (!(*iter))
1728 continue; 1742 continue;
1729 1743
1730 size_t stream_memory_usage = (*iter)->MemoryUsage(); 1744 size_t stream_memory_usage = stream->MemoryUsage();
1731 if (stream_memory_usage > memory_left) 1745 if (stream_memory_usage > memory_left)
1732 return true; 1746 return true;
1733 memory_left -= stream_memory_usage; 1747 memory_left -= stream_memory_usage;
1734 } 1748 }
1735 return false; 1749 return false;
1736 } 1750 }
1737 1751
1738 void FFmpegDemuxer::StreamHasEnded() { 1752 void FFmpegDemuxer::StreamHasEnded() {
1739 DCHECK(task_runner_->BelongsToCurrentThread()); 1753 DCHECK(task_runner_->BelongsToCurrentThread());
1740 StreamVector::iterator iter; 1754 for (const auto& stream : streams_) {
1741 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { 1755 if (stream)
1742 if (!*iter) 1756 stream->SetEndOfStream();
1743 continue;
1744 (*iter)->SetEndOfStream();
1745 } 1757 }
1746 } 1758 }
1747 1759
1748 void FFmpegDemuxer::OnDataSourceError() { 1760 void FFmpegDemuxer::OnDataSourceError() {
1749 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": data source error"; 1761 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": data source error";
1750 host_->OnDemuxerError(PIPELINE_ERROR_READ); 1762 host_->OnDemuxerError(PIPELINE_ERROR_READ);
1751 } 1763 }
1752 1764
1753 void FFmpegDemuxer::SetLiveness(DemuxerStream::Liveness liveness) { 1765 void FFmpegDemuxer::SetLiveness(DemuxerStream::Liveness liveness) {
1754 DCHECK(task_runner_->BelongsToCurrentThread()); 1766 DCHECK(task_runner_->BelongsToCurrentThread());
1755 for (auto* stream : streams_) { 1767 for (const auto& stream : streams_) {
1756 if (stream) 1768 if (stream)
1757 stream->SetLiveness(liveness); 1769 stream->SetLiveness(liveness);
1758 } 1770 }
1759 } 1771 }
1760 1772
1761 } // namespace media 1773 } // namespace media
OLDNEW
« no previous file with comments | « media/filters/ffmpeg_demuxer.h ('k') | media/filters/ffmpeg_demuxer_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698