Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_demuxer.h" | 5 #include "media/filters/ffmpeg_demuxer.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <memory> | 8 #include <memory> |
| 9 #include <utility> | 9 #include <utility> |
| 10 | 10 |
| (...skipping 877 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 888 // in the process of being fulfilled by the DataSource. | 888 // in the process of being fulfilled by the DataSource. |
| 889 data_source_->Stop(); | 889 data_source_->Stop(); |
| 890 url_protocol_->Abort(); | 890 url_protocol_->Abort(); |
| 891 | 891 |
| 892 // This will block until all tasks complete. Note that after this returns it's | 892 // This will block until all tasks complete. Note that after this returns it's |
| 893 // possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this | 893 // possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this |
| 894 // thread. Each of the reply task methods must check whether we've stopped the | 894 // thread. Each of the reply task methods must check whether we've stopped the |
| 895 // thread and drop their results on the floor. | 895 // thread and drop their results on the floor. |
| 896 blocking_thread_.Stop(); | 896 blocking_thread_.Stop(); |
| 897 | 897 |
| 898 StreamVector::iterator iter; | 898 for (const auto& stream : streams_) { |
| 899 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { | 899 if (stream) |
| 900 if (*iter) | 900 stream->Stop(); |
| 901 (*iter)->Stop(); | |
| 902 } | 901 } |
| 903 | 902 |
| 904 data_source_ = NULL; | 903 data_source_ = NULL; |
| 905 | 904 |
| 906 // Invalidate WeakPtrs on |task_runner_|, destruction may happen on another | 905 // Invalidate WeakPtrs on |task_runner_|, destruction may happen on another |
| 907 // thread. | 906 // thread. |
| 908 weak_factory_.InvalidateWeakPtrs(); | 907 weak_factory_.InvalidateWeakPtrs(); |
| 909 } | 908 } |
| 910 | 909 |
| 911 void FFmpegDemuxer::StartWaitingForSeek(base::TimeDelta seek_time) {} | 910 void FFmpegDemuxer::StartWaitingForSeek(base::TimeDelta seek_time) {} |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 973 return timeline_offset_; | 972 return timeline_offset_; |
| 974 } | 973 } |
| 975 | 974 |
| 976 DemuxerStream* FFmpegDemuxer::GetStream(DemuxerStream::Type type) { | 975 DemuxerStream* FFmpegDemuxer::GetStream(DemuxerStream::Type type) { |
| 977 DCHECK(task_runner_->BelongsToCurrentThread()); | 976 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 978 return GetFFmpegStream(type); | 977 return GetFFmpegStream(type); |
| 979 } | 978 } |
| 980 | 979 |
| 981 FFmpegDemuxerStream* FFmpegDemuxer::GetFFmpegStream( | 980 FFmpegDemuxerStream* FFmpegDemuxer::GetFFmpegStream( |
| 982 DemuxerStream::Type type) const { | 981 DemuxerStream::Type type) const { |
| 983 StreamVector::const_iterator iter; | 982 for (const auto& it : streams_) { |
|
DaleCurtis
2016/08/29 18:15:20
Not really an iterator any more.
servolk
2016/08/29 18:38:20
What would be a better name? 'stream'? 's'?
DaleCurtis
2016/08/29 18:49:29
Yeah, you renamed all the rest to streams :)
servolk
2016/08/29 20:25:28
Done.
| |
| 984 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { | 983 if (it && it->type() == type && it->enabled()) { |
| 985 if (*iter && (*iter)->type() == type) { | 984 return it.get(); |
| 986 return *iter; | |
| 987 } | 985 } |
| 988 } | 986 } |
| 989 return NULL; | 987 return NULL; |
| 990 } | 988 } |
| 991 | 989 |
| 992 base::TimeDelta FFmpegDemuxer::GetStartTime() const { | 990 base::TimeDelta FFmpegDemuxer::GetStartTime() const { |
| 993 return std::max(start_time_, base::TimeDelta()); | 991 return std::max(start_time_, base::TimeDelta()); |
| 994 } | 992 } |
| 995 | 993 |
| 996 void FFmpegDemuxer::AddTextStreams() { | 994 void FFmpegDemuxer::AddTextStreams() { |
| 997 DCHECK(task_runner_->BelongsToCurrentThread()); | 995 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 998 | 996 |
| 999 for (StreamVector::size_type idx = 0; idx < streams_.size(); ++idx) { | 997 for (const auto& stream : streams_) { |
| 1000 FFmpegDemuxerStream* stream = streams_[idx]; | 998 if (!stream || stream->type() != DemuxerStream::TEXT) |
| 1001 if (stream == NULL || stream->type() != DemuxerStream::TEXT) | |
| 1002 continue; | 999 continue; |
| 1003 | 1000 |
| 1004 TextKind kind = stream->GetTextKind(); | 1001 TextKind kind = stream->GetTextKind(); |
| 1005 std::string title = stream->GetMetadata("title"); | 1002 std::string title = stream->GetMetadata("title"); |
| 1006 std::string language = stream->GetMetadata("language"); | 1003 std::string language = stream->GetMetadata("language"); |
| 1007 | 1004 |
| 1008 // TODO: Implement "id" metadata in FFMPEG. | 1005 // TODO: Implement "id" metadata in FFMPEG. |
| 1009 // See: http://crbug.com/323183 | 1006 // See: http://crbug.com/323183 |
| 1010 host_->AddTextStream(stream, TextTrackConfig(kind, title, language, | 1007 host_->AddTextStream(stream.get(), |
| 1011 std::string())); | 1008 TextTrackConfig(kind, title, language, std::string())); |
| 1012 } | 1009 } |
| 1013 } | 1010 } |
| 1014 | 1011 |
| 1015 int64_t FFmpegDemuxer::GetMemoryUsage() const { | 1012 int64_t FFmpegDemuxer::GetMemoryUsage() const { |
| 1016 int64_t allocation_size = 0; | 1013 int64_t allocation_size = 0; |
| 1017 for (auto* stream : streams_) { | 1014 for (const auto& stream : streams_) { |
| 1018 if (stream) | 1015 if (stream) |
| 1019 allocation_size += stream->MemoryUsage(); | 1016 allocation_size += stream->MemoryUsage(); |
| 1020 } | 1017 } |
| 1021 return allocation_size; | 1018 return allocation_size; |
| 1022 } | 1019 } |
| 1023 | 1020 |
| 1024 void FFmpegDemuxer::OnEncryptedMediaInitData( | 1021 void FFmpegDemuxer::OnEncryptedMediaInitData( |
| 1025 EmeInitDataType init_data_type, | 1022 EmeInitDataType init_data_type, |
| 1026 const std::string& encryption_key_id) { | 1023 const std::string& encryption_key_id) { |
| 1027 std::vector<uint8_t> key_id_local(encryption_key_id.begin(), | 1024 std::vector<uint8_t> key_id_local(encryption_key_id.begin(), |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1157 const base::TimeDelta packet_pts = | 1154 const base::TimeDelta packet_pts = |
| 1158 ConvertFromTimeBase(stream->time_base, packet_buffer->pkt.pts); | 1155 ConvertFromTimeBase(stream->time_base, packet_buffer->pkt.pts); |
| 1159 if (packet_pts < start_time_estimates[stream->index]) | 1156 if (packet_pts < start_time_estimates[stream->index]) |
| 1160 start_time_estimates[stream->index] = packet_pts; | 1157 start_time_estimates[stream->index] = packet_pts; |
| 1161 } | 1158 } |
| 1162 packet_buffer = packet_buffer->next; | 1159 packet_buffer = packet_buffer->next; |
| 1163 } | 1160 } |
| 1164 } | 1161 } |
| 1165 | 1162 |
| 1166 std::unique_ptr<MediaTracks> media_tracks(new MediaTracks()); | 1163 std::unique_ptr<MediaTracks> media_tracks(new MediaTracks()); |
| 1167 AVStream* audio_stream = NULL; | |
| 1168 AudioDecoderConfig audio_config; | |
| 1169 AVStream* video_stream = NULL; | |
| 1170 VideoDecoderConfig video_config; | |
| 1171 | 1164 |
| 1172 DCHECK(track_id_to_demux_stream_map_.empty()); | 1165 DCHECK(track_id_to_demux_stream_map_.empty()); |
| 1173 | 1166 |
| 1174 // If available, |start_time_| will be set to the lowest stream start time. | 1167 // If available, |start_time_| will be set to the lowest stream start time. |
| 1175 start_time_ = kInfiniteDuration; | 1168 start_time_ = kInfiniteDuration; |
| 1176 | 1169 |
| 1177 base::TimeDelta max_duration; | 1170 base::TimeDelta max_duration; |
| 1178 int detected_audio_track_count = 0; | 1171 int detected_audio_track_count = 0; |
| 1179 int detected_video_track_count = 0; | 1172 int detected_video_track_count = 0; |
| 1180 int detected_text_track_count = 0; | 1173 int detected_text_track_count = 0; |
| 1181 for (size_t i = 0; i < format_context->nb_streams; ++i) { | 1174 for (size_t i = 0; i < format_context->nb_streams; ++i) { |
| 1182 AVStream* stream = format_context->streams[i]; | 1175 AVStream* stream = format_context->streams[i]; |
| 1183 const AVCodecContext* codec_context = stream->codec; | 1176 const AVCodecContext* codec_context = stream->codec; |
| 1184 const AVMediaType codec_type = codec_context->codec_type; | 1177 const AVMediaType codec_type = codec_context->codec_type; |
| 1185 | 1178 |
| 1186 if (codec_type == AVMEDIA_TYPE_AUDIO) { | 1179 if (codec_type == AVMEDIA_TYPE_AUDIO) { |
| 1187 // Log the codec detected, whether it is supported or not, and whether or | 1180 // Log the codec detected, whether it is supported or not, and whether or |
| 1188 // not we have already detected a supported codec in another stream. | 1181 // not we have already detected a supported codec in another stream. |
| 1189 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodecHash", | 1182 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodecHash", |
| 1190 HashCodecName(GetCodecName(codec_context))); | 1183 HashCodecName(GetCodecName(codec_context))); |
| 1191 detected_audio_track_count++; | 1184 detected_audio_track_count++; |
| 1192 | |
| 1193 if (audio_stream) { | |
| 1194 MEDIA_LOG(INFO, media_log_) << GetDisplayName() | |
| 1195 << ": skipping extra audio track"; | |
| 1196 continue; | |
| 1197 } | |
| 1198 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { | 1185 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { |
| 1199 // Log the codec detected, whether it is supported or not, and whether or | 1186 // Log the codec detected, whether it is supported or not, and whether or |
| 1200 // not we have already detected a supported codec in another stream. | 1187 // not we have already detected a supported codec in another stream. |
| 1201 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodecHash", | 1188 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodecHash", |
| 1202 HashCodecName(GetCodecName(codec_context))); | 1189 HashCodecName(GetCodecName(codec_context))); |
| 1203 detected_video_track_count++; | 1190 detected_video_track_count++; |
| 1204 | 1191 |
| 1205 if (video_stream) { | |
| 1206 MEDIA_LOG(INFO, media_log_) << GetDisplayName() | |
| 1207 << ": skipping extra video track"; | |
| 1208 continue; | |
| 1209 } | |
| 1210 | |
| 1211 #if BUILDFLAG(ENABLE_HEVC_DEMUXING) | 1192 #if BUILDFLAG(ENABLE_HEVC_DEMUXING) |
| 1212 if (stream->codec->codec_id == AV_CODEC_ID_HEVC) { | 1193 if (stream->codec->codec_id == AV_CODEC_ID_HEVC) { |
| 1213 // If ffmpeg is built without HEVC parser/decoder support, it will be | 1194 // If ffmpeg is built without HEVC parser/decoder support, it will be |
| 1214 // able to demux HEVC based solely on container-provided information, | 1195 // able to demux HEVC based solely on container-provided information, |
| 1215 // but unable to get some of the parameters without parsing the stream | 1196 // but unable to get some of the parameters without parsing the stream |
| 1216 // (e.g. coded size needs to be read from SPS, pixel format is typically | 1197 // (e.g. coded size needs to be read from SPS, pixel format is typically |
| 1217 // deduced from decoder config in hvcC box). These are not really needed | 1198 // deduced from decoder config in hvcC box). These are not really needed |
| 1218 // when using external decoder (e.g. hardware decoder), so override them | 1199 // when using external decoder (e.g. hardware decoder), so override them |
| 1219 // here, to make sure this translates into a valid VideoDecoderConfig. | 1200 // here, to make sure this translates into a valid VideoDecoderConfig. |
| 1220 if (stream->codec->coded_width == 0 && | 1201 if (stream->codec->coded_width == 0 && |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 1237 } else { | 1218 } else { |
| 1238 continue; | 1219 continue; |
| 1239 } | 1220 } |
| 1240 | 1221 |
| 1241 // Attempt to create a FFmpegDemuxerStream from the AVStream. This will | 1222 // Attempt to create a FFmpegDemuxerStream from the AVStream. This will |
| 1242 // return nullptr if the AVStream is invalid. Validity checks will verify | 1223 // return nullptr if the AVStream is invalid. Validity checks will verify |
| 1243 // things like: codec, channel layout, sample/pixel format, etc... | 1224 // things like: codec, channel layout, sample/pixel format, etc... |
| 1244 std::unique_ptr<FFmpegDemuxerStream> demuxer_stream = | 1225 std::unique_ptr<FFmpegDemuxerStream> demuxer_stream = |
| 1245 FFmpegDemuxerStream::Create(this, stream, media_log_); | 1226 FFmpegDemuxerStream::Create(this, stream, media_log_); |
| 1246 if (demuxer_stream.get()) { | 1227 if (demuxer_stream.get()) { |
| 1247 streams_[i] = demuxer_stream.release(); | 1228 streams_[i] = std::move(demuxer_stream); |
| 1248 } else { | 1229 } else { |
| 1249 if (codec_type == AVMEDIA_TYPE_AUDIO) { | 1230 if (codec_type == AVMEDIA_TYPE_AUDIO) { |
| 1250 MEDIA_LOG(INFO, media_log_) | 1231 MEDIA_LOG(INFO, media_log_) |
| 1251 << GetDisplayName() | 1232 << GetDisplayName() |
| 1252 << ": skipping invalid or unsupported audio track"; | 1233 << ": skipping invalid or unsupported audio track"; |
| 1253 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { | 1234 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { |
| 1254 MEDIA_LOG(INFO, media_log_) | 1235 MEDIA_LOG(INFO, media_log_) |
| 1255 << GetDisplayName() | 1236 << GetDisplayName() |
| 1256 << ": skipping invalid or unsupported video track"; | 1237 << ": skipping invalid or unsupported video track"; |
| 1257 } | 1238 } |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 1271 // Need to fix that and use it as track id. crbug.com/323183 | 1252 // Need to fix that and use it as track id. crbug.com/323183 |
| 1272 track_id = | 1253 track_id = |
| 1273 static_cast<StreamParser::TrackId>(media_tracks->tracks().size() + 1); | 1254 static_cast<StreamParser::TrackId>(media_tracks->tracks().size() + 1); |
| 1274 track_label = streams_[i]->GetMetadata("title"); | 1255 track_label = streams_[i]->GetMetadata("title"); |
| 1275 } | 1256 } |
| 1276 | 1257 |
| 1277 // Note when we find our audio/video stream (we only want one of each) and | 1258 // Note when we find our audio/video stream (we only want one of each) and |
| 1278 // record src= playback UMA stats for the stream's decoder config. | 1259 // record src= playback UMA stats for the stream's decoder config. |
| 1279 MediaTrack* media_track = nullptr; | 1260 MediaTrack* media_track = nullptr; |
| 1280 if (codec_type == AVMEDIA_TYPE_AUDIO) { | 1261 if (codec_type == AVMEDIA_TYPE_AUDIO) { |
| 1281 CHECK(!audio_stream); | 1262 AudioDecoderConfig audio_config = streams_[i]->audio_decoder_config(); |
| 1282 audio_stream = stream; | |
| 1283 audio_config = streams_[i]->audio_decoder_config(); | |
| 1284 RecordAudioCodecStats(audio_config); | 1263 RecordAudioCodecStats(audio_config); |
| 1285 | 1264 |
| 1286 media_track = media_tracks->AddAudioTrack(audio_config, track_id, "main", | 1265 media_track = media_tracks->AddAudioTrack(audio_config, track_id, "main", |
| 1287 track_label, track_language); | 1266 track_label, track_language); |
| 1288 media_track->set_id(base::UintToString(track_id)); | 1267 media_track->set_id(base::UintToString(track_id)); |
| 1289 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) == | 1268 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) == |
| 1290 track_id_to_demux_stream_map_.end()); | 1269 track_id_to_demux_stream_map_.end()); |
| 1291 track_id_to_demux_stream_map_[media_track->id()] = streams_[i]; | 1270 track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get(); |
| 1292 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { | 1271 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { |
| 1293 CHECK(!video_stream); | 1272 VideoDecoderConfig video_config = streams_[i]->video_decoder_config(); |
| 1294 video_stream = stream; | |
| 1295 video_config = streams_[i]->video_decoder_config(); | |
| 1296 | 1273 |
| 1297 RecordVideoCodecStats(video_config, stream->codec->color_range, | 1274 RecordVideoCodecStats(video_config, stream->codec->color_range, |
| 1298 media_log_.get()); | 1275 media_log_.get()); |
| 1299 | 1276 |
| 1300 media_track = media_tracks->AddVideoTrack(video_config, track_id, "main", | 1277 media_track = media_tracks->AddVideoTrack(video_config, track_id, "main", |
| 1301 track_label, track_language); | 1278 track_label, track_language); |
| 1302 media_track->set_id(base::UintToString(track_id)); | 1279 media_track->set_id(base::UintToString(track_id)); |
| 1303 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) == | 1280 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) == |
| 1304 track_id_to_demux_stream_map_.end()); | 1281 track_id_to_demux_stream_map_.end()); |
| 1305 track_id_to_demux_stream_map_[media_track->id()] = streams_[i]; | 1282 track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get(); |
| 1306 } | 1283 } |
| 1307 | 1284 |
| 1308 max_duration = std::max(max_duration, streams_[i]->duration()); | 1285 max_duration = std::max(max_duration, streams_[i]->duration()); |
| 1309 | 1286 |
| 1310 const base::TimeDelta start_time = | 1287 const base::TimeDelta start_time = |
| 1311 ExtractStartTime(stream, start_time_estimates[i]); | 1288 ExtractStartTime(stream, start_time_estimates[i]); |
| 1312 const bool has_start_time = start_time != kNoTimestamp; | 1289 const bool has_start_time = start_time != kNoTimestamp; |
| 1313 | 1290 |
| 1314 if (!has_start_time) | 1291 if (!has_start_time) |
| 1315 continue; | 1292 continue; |
| 1316 | 1293 |
| 1317 streams_[i]->set_start_time(start_time); | 1294 streams_[i]->set_start_time(start_time); |
| 1318 if (start_time < start_time_) { | 1295 if (start_time < start_time_) { |
| 1319 start_time_ = start_time; | 1296 start_time_ = start_time; |
| 1320 } | 1297 } |
| 1321 } | 1298 } |
| 1322 | 1299 |
| 1323 RecordDetectedTrackTypeStats(detected_audio_track_count, | 1300 RecordDetectedTrackTypeStats(detected_audio_track_count, |
| 1324 detected_video_track_count, | 1301 detected_video_track_count, |
| 1325 detected_text_track_count); | 1302 detected_text_track_count); |
| 1326 | 1303 |
| 1327 if (!audio_stream && !video_stream) { | 1304 if (media_tracks->tracks().size() == 0) { |
|
DaleCurtis
2016/08/29 18:15:20
.empty?
servolk
2016/08/29 18:38:20
Done.
| |
| 1328 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() | 1305 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() |
| 1329 << ": no supported streams"; | 1306 << ": no supported streams"; |
| 1330 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); | 1307 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); |
| 1331 return; | 1308 return; |
| 1332 } | 1309 } |
| 1333 | 1310 |
| 1334 if (text_enabled_) | 1311 if (text_enabled_) |
| 1335 AddTextStreams(); | 1312 AddTextStreams(); |
| 1336 | 1313 |
| 1337 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 1314 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 1355 // not be exposed to negative timestamps. Which means we need to rebase these | 1332 // not be exposed to negative timestamps. Which means we need to rebase these |
| 1356 // negative timestamps and mark them for discard post decoding. | 1333 // negative timestamps and mark them for discard post decoding. |
| 1357 // | 1334 // |
| 1358 // Post-decode frame dropping for packets with negative timestamps is outlined | 1335 // Post-decode frame dropping for packets with negative timestamps is outlined |
| 1359 // in section A.2 in the Ogg Vorbis spec: | 1336 // in section A.2 in the Ogg Vorbis spec: |
| 1360 // http://xiph.org/vorbis/doc/Vorbis_I_spec.html | 1337 // http://xiph.org/vorbis/doc/Vorbis_I_spec.html |
| 1361 // | 1338 // |
| 1362 // FFmpeg's use of negative timestamps for opus pre-skip is nonstandard, but | 1339 // FFmpeg's use of negative timestamps for opus pre-skip is nonstandard, but |
| 1363 // for more information on pre-skip see section 4.2 of the Ogg Opus spec: | 1340 // for more information on pre-skip see section 4.2 of the Ogg Opus spec: |
| 1364 // https://tools.ietf.org/html/draft-ietf-codec-oggopus-08#section-4.2 | 1341 // https://tools.ietf.org/html/draft-ietf-codec-oggopus-08#section-4.2 |
| 1365 if (audio_stream && (audio_stream->codec->codec_id == AV_CODEC_ID_OPUS || | 1342 for (const auto& stream : streams_) { |
|
DaleCurtis
2016/08/29 18:15:20
The behavior here is different. Previously it was
servolk
2016/08/29 18:38:21
It think it might matter. If we do have a stream t
| |
| 1366 (strcmp(format_context->iformat->name, "ogg") == 0 && | 1343 if (!stream || stream->type() != DemuxerStream::AUDIO) |
| 1367 audio_stream->codec->codec_id == AV_CODEC_ID_VORBIS))) { | 1344 continue; |
| 1368 for (size_t i = 0; i < streams_.size(); ++i) { | 1345 const AVStream* audio_stream = stream->av_stream(); |
| 1369 if (!streams_[i]) | 1346 DCHECK(audio_stream); |
| 1370 continue; | 1347 if (audio_stream->codec->codec_id == AV_CODEC_ID_OPUS || |
| 1371 streams_[i]->enable_negative_timestamp_fixups(); | 1348 (strcmp(format_context->iformat->name, "ogg") == 0 && |
|
DaleCurtis
2016/08/29 18:15:21
This can be moved outside the loop.
servolk
2016/08/29 18:38:21
Are you talking about strcmp? But it's bound to th
DaleCurtis
2016/08/29 18:49:29
Ah, sorry I forgot this isn't explicitly for ogg a
servolk
2016/08/29 20:25:28
Acknowledged.
| |
| 1349 audio_stream->codec->codec_id == AV_CODEC_ID_VORBIS)) { | |
| 1350 for (size_t i = 0; i < streams_.size(); ++i) { | |
| 1351 if (!streams_[i]) | |
| 1352 continue; | |
| 1353 streams_[i]->enable_negative_timestamp_fixups(); | |
| 1372 | 1354 |
| 1373 // Fixup the seeking information to avoid selecting the audio stream | 1355 // Fixup the seeking information to avoid selecting the audio stream |
| 1374 // simply because it has a lower starting time. | 1356 // simply because it has a lower starting time. |
| 1375 if (streams_[i]->av_stream() == audio_stream && | 1357 if (streams_[i]->av_stream() == audio_stream && |
| 1376 streams_[i]->start_time() < base::TimeDelta()) { | 1358 streams_[i]->start_time() < base::TimeDelta()) { |
| 1377 streams_[i]->set_start_time(base::TimeDelta()); | 1359 streams_[i]->set_start_time(base::TimeDelta()); |
| 1360 } | |
| 1378 } | 1361 } |
| 1379 } | 1362 } |
| 1380 } | 1363 } |
| 1381 | 1364 |
| 1382 // If no start time could be determined, default to zero. | 1365 // If no start time could be determined, default to zero. |
| 1383 if (start_time_ == kInfiniteDuration) { | 1366 if (start_time_ == kInfiniteDuration) { |
| 1384 start_time_ = base::TimeDelta(); | 1367 start_time_ = base::TimeDelta(); |
| 1385 } | 1368 } |
| 1386 | 1369 |
| 1387 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS | 1370 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 1410 // initializing. | 1393 // initializing. |
| 1411 host_->SetDuration(max_duration); | 1394 host_->SetDuration(max_duration); |
| 1412 duration_known_ = (max_duration != kInfiniteDuration); | 1395 duration_known_ = (max_duration != kInfiniteDuration); |
| 1413 | 1396 |
| 1414 int64_t filesize_in_bytes = 0; | 1397 int64_t filesize_in_bytes = 0; |
| 1415 url_protocol_->GetSize(&filesize_in_bytes); | 1398 url_protocol_->GetSize(&filesize_in_bytes); |
| 1416 bitrate_ = CalculateBitrate(format_context, max_duration, filesize_in_bytes); | 1399 bitrate_ = CalculateBitrate(format_context, max_duration, filesize_in_bytes); |
| 1417 if (bitrate_ > 0) | 1400 if (bitrate_ > 0) |
| 1418 data_source_->SetBitrate(bitrate_); | 1401 data_source_->SetBitrate(bitrate_); |
| 1419 | 1402 |
| 1403 LogMetadata(format_context, max_duration); | |
| 1404 media_tracks_updated_cb_.Run(std::move(media_tracks)); | |
| 1405 | |
| 1406 status_cb.Run(PIPELINE_OK); | |
| 1407 } | |
| 1408 | |
| 1409 void FFmpegDemuxer::LogMetadata(AVFormatContext* avctx, | |
| 1410 base::TimeDelta max_duration) { | |
| 1420 // Use a single MediaLogEvent to batch all parameter updates at once; this | 1411 // Use a single MediaLogEvent to batch all parameter updates at once; this |
| 1421 // prevents throttling of events due to the large number of updates here. | 1412 // prevents throttling of events due to the large number of updates here. |
| 1422 std::unique_ptr<MediaLogEvent> metadata_event = | 1413 std::unique_ptr<MediaLogEvent> metadata_event = |
| 1423 media_log_->CreateEvent(MediaLogEvent::PROPERTY_CHANGE); | 1414 media_log_->CreateEvent(MediaLogEvent::PROPERTY_CHANGE); |
| 1424 | 1415 |
| 1425 // Audio logging. | 1416 DCHECK_EQ(avctx->nb_streams, streams_.size()); |
| 1426 metadata_event->params.SetBoolean("found_audio_stream", !!audio_stream); | 1417 auto& params = metadata_event->params; |
| 1427 if (audio_stream) { | 1418 int audio_track_count = 0; |
| 1428 const AVCodecContext* audio_codec = audio_stream->codec; | 1419 int video_track_count = 0; |
| 1429 metadata_event->params.SetString("audio_codec_name", | 1420 for (size_t i = 0; i < streams_.size(); ++i) { |
|
DaleCurtis
2016/08/29 18:15:20
I think some of these keys might be used by MediaI
servolk
2016/08/29 18:38:21
I have found only one place where these params see
DaleCurtis
2016/08/29 18:49:29
It's still necessary, it allows us to break out pi
servolk
2016/08/29 20:25:28
IIUC codecs will never change within tracks, but i
DaleCurtis
2016/08/29 20:29:07
Yeah, that's definitely not going to work today no
servolk
2016/08/29 21:46:24
There is probably no need to change DecoderSelecto
servolk
2016/08/29 22:02:38
Also, I have just looked a bit more into how this
| |
| 1430 GetCodecName(audio_codec)); | 1421 FFmpegDemuxerStream* stream = streams_[i].get(); |
| 1431 metadata_event->params.SetInteger("audio_channels_count", | 1422 if (!stream) |
| 1432 audio_codec->channels); | 1423 continue; |
| 1433 metadata_event->params.SetString( | 1424 if (stream->type() == DemuxerStream::AUDIO) { |
| 1434 "audio_sample_format", | 1425 ++audio_track_count; |
| 1435 SampleFormatToString(audio_config.sample_format())); | 1426 std::string suffix = ""; |
| 1436 metadata_event->params.SetInteger("audio_samples_per_second", | 1427 if (audio_track_count > 1) |
| 1437 audio_config.samples_per_second()); | 1428 suffix = "_track" + base::IntToString(audio_track_count); |
| 1429 const AVCodecContext* audio_codec = avctx->streams[i]->codec; | |
| 1430 const AudioDecoderConfig& audio_config = stream->audio_decoder_config(); | |
| 1431 params.SetString("audio_codec_name" + suffix, GetCodecName(audio_codec)); | |
| 1432 params.SetInteger("audio_channels_count" + suffix, audio_codec->channels); | |
| 1433 params.SetString("audio_sample_format" + suffix, | |
| 1434 SampleFormatToString(audio_config.sample_format())); | |
| 1435 params.SetInteger("audio_samples_per_second" + suffix, | |
| 1436 audio_config.samples_per_second()); | |
| 1437 } else if (stream->type() == DemuxerStream::VIDEO) { | |
| 1438 ++video_track_count; | |
| 1439 std::string suffix = ""; | |
| 1440 if (video_track_count > 1) | |
| 1441 suffix = "_track" + base::IntToString(video_track_count); | |
| 1442 const AVCodecContext* video_codec = avctx->streams[i]->codec; | |
| 1443 const VideoDecoderConfig& video_config = stream->video_decoder_config(); | |
| 1444 params.SetString("video_codec_name" + suffix, GetCodecName(video_codec)); | |
| 1445 params.SetInteger("width" + suffix, video_codec->width); | |
| 1446 params.SetInteger("height" + suffix, video_codec->height); | |
| 1447 params.SetInteger("coded_width" + suffix, video_codec->coded_width); | |
| 1448 params.SetInteger("coded_height" + suffix, video_codec->coded_height); | |
| 1449 params.SetString("time_base" + suffix, | |
| 1450 base::StringPrintf("%d/%d", video_codec->time_base.num, | |
| 1451 video_codec->time_base.den)); | |
| 1452 params.SetString("video_format" + suffix, | |
| 1453 VideoPixelFormatToString(video_config.format())); | |
| 1454 params.SetBoolean("video_is_encrypted" + suffix, | |
| 1455 video_config.is_encrypted()); | |
| 1456 } | |
| 1438 } | 1457 } |
| 1439 | 1458 params.SetBoolean("found_audio_stream", (audio_track_count > 0)); |
| 1440 // Video logging | 1459 params.SetBoolean("found_video_stream", (video_track_count > 0)); |
| 1441 metadata_event->params.SetBoolean("found_video_stream", !!video_stream); | |
| 1442 if (video_stream) { | |
| 1443 const AVCodecContext* video_codec = video_stream->codec; | |
| 1444 metadata_event->params.SetString("video_codec_name", | |
| 1445 GetCodecName(video_codec)); | |
| 1446 metadata_event->params.SetInteger("width", video_codec->width); | |
| 1447 metadata_event->params.SetInteger("height", video_codec->height); | |
| 1448 metadata_event->params.SetInteger("coded_width", video_codec->coded_width); | |
| 1449 metadata_event->params.SetInteger("coded_height", | |
| 1450 video_codec->coded_height); | |
| 1451 metadata_event->params.SetString( | |
| 1452 "time_base", base::StringPrintf("%d/%d", video_codec->time_base.num, | |
| 1453 video_codec->time_base.den)); | |
| 1454 metadata_event->params.SetString( | |
| 1455 "video_format", VideoPixelFormatToString(video_config.format())); | |
| 1456 metadata_event->params.SetBoolean("video_is_encrypted", | |
| 1457 video_config.is_encrypted()); | |
| 1458 } | |
| 1459 | |
| 1460 SetTimeProperty(metadata_event.get(), "max_duration", max_duration); | 1460 SetTimeProperty(metadata_event.get(), "max_duration", max_duration); |
| 1461 SetTimeProperty(metadata_event.get(), "start_time", start_time_); | 1461 SetTimeProperty(metadata_event.get(), "start_time", start_time_); |
| 1462 metadata_event->params.SetInteger("bitrate", bitrate_); | 1462 metadata_event->params.SetInteger("bitrate", bitrate_); |
| 1463 media_log_->AddEvent(std::move(metadata_event)); | 1463 media_log_->AddEvent(std::move(metadata_event)); |
| 1464 | |
| 1465 media_tracks_updated_cb_.Run(std::move(media_tracks)); | |
| 1466 | |
| 1467 status_cb.Run(PIPELINE_OK); | |
| 1468 } | 1464 } |
| 1469 | 1465 |
| 1470 FFmpegDemuxerStream* FFmpegDemuxer::FindPreferredStreamForSeeking( | 1466 FFmpegDemuxerStream* FFmpegDemuxer::FindPreferredStreamForSeeking( |
| 1471 base::TimeDelta seek_time) { | 1467 base::TimeDelta seek_time) { |
| 1472 // If we have a selected/enabled video stream and its start time is lower | 1468 // If we have a selected/enabled video stream and its start time is lower |
| 1473 // than the |seek_time| or unknown, then always prefer it for seeking. | 1469 // than the |seek_time| or unknown, then always prefer it for seeking. |
| 1474 FFmpegDemuxerStream* video_stream = nullptr; | 1470 FFmpegDemuxerStream* video_stream = nullptr; |
| 1475 for (const auto& stream : streams_) { | 1471 for (const auto& stream : streams_) { |
| 1476 if (stream && stream->type() == DemuxerStream::VIDEO && stream->enabled()) { | 1472 if (stream && stream->type() == DemuxerStream::VIDEO && stream->enabled()) { |
| 1477 video_stream = stream; | 1473 video_stream = stream.get(); |
| 1478 if (video_stream->start_time() == kNoTimestamp || | 1474 if (video_stream->start_time() == kNoTimestamp || |
| 1479 video_stream->start_time() <= seek_time) { | 1475 video_stream->start_time() <= seek_time) { |
| 1480 return stream; | 1476 return video_stream; |
| 1481 } | 1477 } |
| 1482 break; | 1478 break; |
| 1483 } | 1479 } |
| 1484 } | 1480 } |
| 1485 | 1481 |
| 1486 // If video stream is not present or |seek_time| is lower than the video start | 1482 // If video stream is not present or |seek_time| is lower than the video start |
| 1487 // time, then try to find an enabled stream with the lowest start time. | 1483 // time, then try to find an enabled stream with the lowest start time. |
| 1488 FFmpegDemuxerStream* lowest_start_time_stream = nullptr; | 1484 FFmpegDemuxerStream* lowest_start_time_stream = nullptr; |
| 1489 for (const auto& stream : streams_) { | 1485 for (const auto& stream : streams_) { |
| 1490 if (!stream || !stream->enabled() || stream->start_time() == kNoTimestamp) | 1486 if (!stream || !stream->enabled() || stream->start_time() == kNoTimestamp) |
| 1491 continue; | 1487 continue; |
| 1492 if (!lowest_start_time_stream || | 1488 if (!lowest_start_time_stream || |
| 1493 stream->start_time() < lowest_start_time_stream->start_time()) { | 1489 stream->start_time() < lowest_start_time_stream->start_time()) { |
| 1494 lowest_start_time_stream = stream; | 1490 lowest_start_time_stream = stream.get(); |
| 1495 } | 1491 } |
| 1496 } | 1492 } |
| 1497 // If we found a stream with start time lower than |seek_time|, then use it. | 1493 // If we found a stream with start time lower than |seek_time|, then use it. |
| 1498 if (lowest_start_time_stream && | 1494 if (lowest_start_time_stream && |
| 1499 lowest_start_time_stream->start_time() <= seek_time) { | 1495 lowest_start_time_stream->start_time() <= seek_time) { |
| 1500 return lowest_start_time_stream; | 1496 return lowest_start_time_stream; |
| 1501 } | 1497 } |
| 1502 | 1498 |
| 1503 // If we couldn't find any streams with the start time lower than |seek_time| | 1499 // If we couldn't find any streams with the start time lower than |seek_time| |
| 1504 // then use either video (if one exists) or any audio stream. | 1500 // then use either video (if one exists) or any audio stream. |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 1518 } | 1514 } |
| 1519 | 1515 |
| 1520 if (result < 0) { | 1516 if (result < 0) { |
| 1521 // Use VLOG(1) instead of NOTIMPLEMENTED() to prevent the message being | 1517 // Use VLOG(1) instead of NOTIMPLEMENTED() to prevent the message being |
| 1522 // captured from stdout and contaminates testing. | 1518 // captured from stdout and contaminates testing. |
| 1523 // TODO(scherkus): Implement this properly and signal error (BUG=23447). | 1519 // TODO(scherkus): Implement this properly and signal error (BUG=23447). |
| 1524 VLOG(1) << "Not implemented"; | 1520 VLOG(1) << "Not implemented"; |
| 1525 } | 1521 } |
| 1526 | 1522 |
| 1527 // Tell streams to flush buffers due to seeking. | 1523 // Tell streams to flush buffers due to seeking. |
| 1528 StreamVector::iterator iter; | 1524 for (const auto& stream : streams_) { |
| 1529 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { | 1525 if (stream) |
| 1530 if (*iter) | 1526 stream->FlushBuffers(); |
| 1531 (*iter)->FlushBuffers(); | |
| 1532 } | 1527 } |
| 1533 | 1528 |
| 1534 // Resume reading until capacity. | 1529 // Resume reading until capacity. |
| 1535 ReadFrameIfNeeded(); | 1530 ReadFrameIfNeeded(); |
| 1536 | 1531 |
| 1537 // Notify we're finished seeking. | 1532 // Notify we're finished seeking. |
| 1538 cb.Run(PIPELINE_OK); | 1533 cb.Run(PIPELINE_OK); |
| 1539 } | 1534 } |
| 1540 | 1535 |
| 1541 void FFmpegDemuxer::OnEnabledAudioTracksChanged( | 1536 void FFmpegDemuxer::OnEnabledAudioTracksChanged( |
| 1542 const std::vector<MediaTrack::Id>& track_ids, | 1537 const std::vector<MediaTrack::Id>& track_ids, |
| 1543 base::TimeDelta currTime) { | 1538 base::TimeDelta currTime) { |
| 1544 DCHECK(task_runner_->BelongsToCurrentThread()); | 1539 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 1545 bool enabled = false; | 1540 |
| 1546 DemuxerStream* audio_stream = GetStream(DemuxerStream::AUDIO); | 1541 std::set<DemuxerStream*> enabled_streams; |
| 1547 CHECK(audio_stream); | 1542 for (const auto& id : track_ids) { |
| 1548 if (track_ids.size() > 0) { | 1543 DemuxerStream* stream = track_id_to_demux_stream_map_[id]; |
| 1549 DCHECK(track_id_to_demux_stream_map_[track_ids[0]] == audio_stream); | 1544 DCHECK(stream); |
| 1550 enabled = true; | 1545 DCHECK_EQ(DemuxerStream::AUDIO, stream->type()); |
| 1546 enabled_streams.insert(stream); | |
|
DaleCurtis
2016/08/29 18:15:20
Is there a more efficient insert here? This can ca
DaleCurtis
2016/08/29 18:15:20
Is there a more efficient insert here? This can ca
servolk
2016/08/29 18:38:20
I think we don't need to be concerned about perfor
| |
| 1551 } | 1547 } |
| 1552 DVLOG(1) << __func__ << ": " << (enabled ? "enabling" : "disabling") | 1548 |
| 1553 << " audio stream"; | 1549 // First disable all streams that need to be disabled and then enable streams |
| 1554 audio_stream->set_enabled(enabled, currTime); | 1550 // that are enabled. |
| 1551 for (const auto& stream : streams_) { | |
| 1552 if (stream->type() == DemuxerStream::AUDIO && | |
| 1553 enabled_streams.find(stream.get()) == enabled_streams.end()) { | |
| 1554 DVLOG(1) << __func__ << ": disabling stream " << stream.get(); | |
| 1555 stream->set_enabled(false, currTime); | |
| 1556 } | |
| 1557 } | |
| 1558 for (const auto& stream : enabled_streams) { | |
| 1559 DVLOG(1) << __func__ << ": enabling stream " << stream; | |
| 1560 stream->set_enabled(true, currTime); | |
| 1561 } | |
| 1555 } | 1562 } |
| 1556 | 1563 |
| 1557 void FFmpegDemuxer::OnSelectedVideoTrackChanged( | 1564 void FFmpegDemuxer::OnSelectedVideoTrackChanged( |
| 1558 const std::vector<MediaTrack::Id>& track_ids, | 1565 const std::vector<MediaTrack::Id>& track_ids, |
| 1559 base::TimeDelta currTime) { | 1566 base::TimeDelta currTime) { |
| 1560 DCHECK(task_runner_->BelongsToCurrentThread()); | 1567 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 1561 bool enabled = false; | 1568 DCHECK_LE(track_ids.size(), 1u); |
| 1562 DemuxerStream* video_stream = GetStream(DemuxerStream::VIDEO); | 1569 |
| 1563 CHECK(video_stream); | 1570 DemuxerStream* selected_stream = nullptr; |
| 1564 if (track_ids.size() > 0) { | 1571 if (!track_ids.empty()) { |
| 1565 DCHECK(track_id_to_demux_stream_map_[track_ids[0]] == video_stream); | 1572 selected_stream = track_id_to_demux_stream_map_[track_ids[0]]; |
| 1566 enabled = true; | 1573 DCHECK(selected_stream); |
| 1574 DCHECK_EQ(DemuxerStream::VIDEO, selected_stream->type()); | |
| 1567 } | 1575 } |
| 1568 DVLOG(1) << __func__ << ": " << (enabled ? "enabling" : "disabling") | 1576 |
| 1569 << " video stream"; | 1577 // First disable all streams that need to be disabled and then enable the |
| 1570 video_stream->set_enabled(enabled, currTime); | 1578 // stream that needs to be enabled (if any). |
| 1579 for (const auto& stream : streams_) { | |
| 1580 if (stream->type() == DemuxerStream::VIDEO && | |
| 1581 stream.get() != selected_stream) { | |
| 1582 DVLOG(1) << __func__ << ": disabling stream " << stream.get(); | |
| 1583 stream->set_enabled(false, currTime); | |
| 1584 } | |
| 1585 } | |
| 1586 if (selected_stream) { | |
| 1587 DVLOG(1) << __func__ << ": enabling stream " << selected_stream; | |
| 1588 selected_stream->set_enabled(true, currTime); | |
| 1589 } | |
| 1571 } | 1590 } |
| 1572 | 1591 |
| 1573 void FFmpegDemuxer::ReadFrameIfNeeded() { | 1592 void FFmpegDemuxer::ReadFrameIfNeeded() { |
| 1574 DCHECK(task_runner_->BelongsToCurrentThread()); | 1593 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 1575 | 1594 |
| 1576 // Make sure we have work to do before reading. | 1595 // Make sure we have work to do before reading. |
| 1577 if (!blocking_thread_.IsRunning() || !StreamsHaveAvailableCapacity() || | 1596 if (!blocking_thread_.IsRunning() || !StreamsHaveAvailableCapacity() || |
| 1578 pending_read_ || pending_seek_) { | 1597 pending_read_ || pending_seek_) { |
| 1579 return; | 1598 return; |
| 1580 } | 1599 } |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 1608 // - either underlying ffmpeg returned an error | 1627 // - either underlying ffmpeg returned an error |
| 1609 // - or FFMpegDemuxer reached the maximum allowed memory usage. | 1628 // - or FFMpegDemuxer reached the maximum allowed memory usage. |
| 1610 if (result < 0 || IsMaxMemoryUsageReached()) { | 1629 if (result < 0 || IsMaxMemoryUsageReached()) { |
| 1611 LOG(ERROR) << __func__ << " result=" << result | 1630 LOG(ERROR) << __func__ << " result=" << result |
| 1612 << " IsMaxMemoryUsageReached=" << IsMaxMemoryUsageReached(); | 1631 << " IsMaxMemoryUsageReached=" << IsMaxMemoryUsageReached(); |
| 1613 // Update the duration based on the highest elapsed time across all streams | 1632 // Update the duration based on the highest elapsed time across all streams |
| 1614 // if it was previously unknown. | 1633 // if it was previously unknown. |
| 1615 if (!duration_known_) { | 1634 if (!duration_known_) { |
| 1616 base::TimeDelta max_duration; | 1635 base::TimeDelta max_duration; |
| 1617 | 1636 |
| 1618 for (StreamVector::iterator iter = streams_.begin(); | 1637 for (const auto& stream : streams_) { |
| 1619 iter != streams_.end(); | 1638 if (!stream) |
| 1620 ++iter) { | |
| 1621 if (!*iter) | |
| 1622 continue; | 1639 continue; |
| 1623 | 1640 |
| 1624 base::TimeDelta duration = (*iter)->GetElapsedTime(); | 1641 base::TimeDelta duration = stream->GetElapsedTime(); |
| 1625 if (duration != kNoTimestamp && duration > max_duration) | 1642 if (duration != kNoTimestamp && duration > max_duration) |
| 1626 max_duration = duration; | 1643 max_duration = duration; |
| 1627 } | 1644 } |
| 1628 | 1645 |
| 1629 if (max_duration > base::TimeDelta()) { | 1646 if (max_duration > base::TimeDelta()) { |
| 1630 host_->SetDuration(max_duration); | 1647 host_->SetDuration(max_duration); |
| 1631 duration_known_ = true; | 1648 duration_known_ = true; |
| 1632 } | 1649 } |
| 1633 } | 1650 } |
| 1634 // If we have reached the end of stream, tell the downstream filters about | 1651 // If we have reached the end of stream, tell the downstream filters about |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 1649 // when av_read_frame() returns success code. See bug comment for ideas: | 1666 // when av_read_frame() returns success code. See bug comment for ideas: |
| 1650 // | 1667 // |
| 1651 // https://code.google.com/p/chromium/issues/detail?id=169133#c10 | 1668 // https://code.google.com/p/chromium/issues/detail?id=169133#c10 |
| 1652 if (!packet->data) { | 1669 if (!packet->data) { |
| 1653 ScopedAVPacket new_packet(new AVPacket()); | 1670 ScopedAVPacket new_packet(new AVPacket()); |
| 1654 av_new_packet(new_packet.get(), 0); | 1671 av_new_packet(new_packet.get(), 0); |
| 1655 av_packet_copy_props(new_packet.get(), packet.get()); | 1672 av_packet_copy_props(new_packet.get(), packet.get()); |
| 1656 packet.swap(new_packet); | 1673 packet.swap(new_packet); |
| 1657 } | 1674 } |
| 1658 | 1675 |
| 1659 FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index]; | 1676 FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index].get(); |
| 1660 if (demuxer_stream->enabled()) | 1677 if (demuxer_stream->enabled()) |
| 1661 demuxer_stream->EnqueuePacket(std::move(packet)); | 1678 demuxer_stream->EnqueuePacket(std::move(packet)); |
| 1662 } | 1679 } |
| 1663 | 1680 |
| 1664 // Keep reading until we've reached capacity. | 1681 // Keep reading until we've reached capacity. |
| 1665 ReadFrameIfNeeded(); | 1682 ReadFrameIfNeeded(); |
| 1666 } | 1683 } |
| 1667 | 1684 |
| 1668 bool FFmpegDemuxer::StreamsHaveAvailableCapacity() { | 1685 bool FFmpegDemuxer::StreamsHaveAvailableCapacity() { |
| 1669 DCHECK(task_runner_->BelongsToCurrentThread()); | 1686 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 1670 StreamVector::iterator iter; | 1687 for (const auto& stream : streams_) { |
| 1671 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { | 1688 if (stream && stream->HasAvailableCapacity()) |
| 1672 if (*iter && (*iter)->HasAvailableCapacity()) { | |
| 1673 return true; | 1689 return true; |
| 1674 } | |
| 1675 } | 1690 } |
| 1676 return false; | 1691 return false; |
| 1677 } | 1692 } |
| 1678 | 1693 |
| 1679 bool FFmpegDemuxer::IsMaxMemoryUsageReached() const { | 1694 bool FFmpegDemuxer::IsMaxMemoryUsageReached() const { |
| 1680 DCHECK(task_runner_->BelongsToCurrentThread()); | 1695 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 1681 | 1696 |
| 1682 // Max allowed memory usage, all streams combined. | 1697 // Max allowed memory usage, all streams combined. |
| 1683 const size_t kDemuxerMemoryLimit = 150 * 1024 * 1024; | 1698 const size_t kDemuxerMemoryLimit = 150 * 1024 * 1024; |
| 1684 | 1699 |
| 1685 size_t memory_left = kDemuxerMemoryLimit; | 1700 size_t memory_left = kDemuxerMemoryLimit; |
| 1686 for (StreamVector::const_iterator iter = streams_.begin(); | 1701 for (const auto& stream : streams_) { |
| 1687 iter != streams_.end(); ++iter) { | 1702 if (!stream) |
| 1688 if (!(*iter)) | |
| 1689 continue; | 1703 continue; |
| 1690 | 1704 |
| 1691 size_t stream_memory_usage = (*iter)->MemoryUsage(); | 1705 size_t stream_memory_usage = stream->MemoryUsage(); |
| 1692 if (stream_memory_usage > memory_left) | 1706 if (stream_memory_usage > memory_left) |
| 1693 return true; | 1707 return true; |
| 1694 memory_left -= stream_memory_usage; | 1708 memory_left -= stream_memory_usage; |
| 1695 } | 1709 } |
| 1696 return false; | 1710 return false; |
| 1697 } | 1711 } |
| 1698 | 1712 |
| 1699 void FFmpegDemuxer::StreamHasEnded() { | 1713 void FFmpegDemuxer::StreamHasEnded() { |
| 1700 DCHECK(task_runner_->BelongsToCurrentThread()); | 1714 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 1701 StreamVector::iterator iter; | 1715 for (const auto& stream : streams_) { |
| 1702 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { | 1716 if (stream) |
| 1703 if (!*iter) | 1717 stream->SetEndOfStream(); |
| 1704 continue; | |
| 1705 (*iter)->SetEndOfStream(); | |
| 1706 } | 1718 } |
| 1707 } | 1719 } |
| 1708 | 1720 |
| 1709 void FFmpegDemuxer::OnDataSourceError() { | 1721 void FFmpegDemuxer::OnDataSourceError() { |
| 1710 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": data source error"; | 1722 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": data source error"; |
| 1711 host_->OnDemuxerError(PIPELINE_ERROR_READ); | 1723 host_->OnDemuxerError(PIPELINE_ERROR_READ); |
| 1712 } | 1724 } |
| 1713 | 1725 |
| 1714 void FFmpegDemuxer::SetLiveness(DemuxerStream::Liveness liveness) { | 1726 void FFmpegDemuxer::SetLiveness(DemuxerStream::Liveness liveness) { |
| 1715 DCHECK(task_runner_->BelongsToCurrentThread()); | 1727 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 1716 for (auto* stream : streams_) { | 1728 for (const auto& stream : streams_) { |
| 1717 if (stream) | 1729 if (stream) |
| 1718 stream->SetLiveness(liveness); | 1730 stream->SetLiveness(liveness); |
| 1719 } | 1731 } |
| 1720 } | 1732 } |
| 1721 | 1733 |
| 1722 } // namespace media | 1734 } // namespace media |
| OLD | NEW |