| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_demuxer.h" | 5 #include "media/filters/ffmpeg_demuxer.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <memory> | 8 #include <memory> |
| 9 #include <set> | 9 #include <set> |
| 10 #include <utility> | 10 #include <utility> |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 47 | 47 |
| 48 namespace media { | 48 namespace media { |
| 49 | 49 |
| 50 static base::Time ExtractTimelineOffset(AVFormatContext* format_context) { | 50 static base::Time ExtractTimelineOffset(AVFormatContext* format_context) { |
| 51 if (strstr(format_context->iformat->name, "webm") || | 51 if (strstr(format_context->iformat->name, "webm") || |
| 52 strstr(format_context->iformat->name, "matroska")) { | 52 strstr(format_context->iformat->name, "matroska")) { |
| 53 const AVDictionaryEntry* entry = | 53 const AVDictionaryEntry* entry = |
| 54 av_dict_get(format_context->metadata, "creation_time", NULL, 0); | 54 av_dict_get(format_context->metadata, "creation_time", NULL, 0); |
| 55 | 55 |
| 56 base::Time timeline_offset; | 56 base::Time timeline_offset; |
| 57 |
| 58 // FFmpegDemuxerTests assume base::Time::FromUTCString() is used here. |
| 57 if (entry != NULL && entry->value != NULL && | 59 if (entry != NULL && entry->value != NULL && |
| 58 FFmpegUTCDateToTime(entry->value, &timeline_offset)) { | 60 base::Time::FromUTCString(entry->value, &timeline_offset)) { |
| 59 return timeline_offset; | 61 return timeline_offset; |
| 60 } | 62 } |
| 61 } | 63 } |
| 62 | 64 |
| 63 return base::Time(); | 65 return base::Time(); |
| 64 } | 66 } |
| 65 | 67 |
| 66 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) { | 68 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) { |
| 67 return base::TimeDelta::FromMicroseconds( | 69 return base::TimeDelta::FromMicroseconds( |
| 68 frames * base::Time::kMicrosecondsPerSecond / sample_rate); | 70 frames * base::Time::kMicrosecondsPerSecond / sample_rate); |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 172 | 174 |
| 173 // Note the PRESUBMIT_IGNORE_UMA_MAX below, this silences the PRESUBMIT.py | 175 // Note the PRESUBMIT_IGNORE_UMA_MAX below, this silences the PRESUBMIT.py |
| 174 // check for uma enum max usage, since we're abusing | 176 // check for uma enum max usage, since we're abusing |
| 175 // UMA_HISTOGRAM_ENUMERATION to report a discrete value. | 177 // UMA_HISTOGRAM_ENUMERATION to report a discrete value. |
| 176 UMA_HISTOGRAM_ENUMERATION("Media.VideoColorRange", color_range, | 178 UMA_HISTOGRAM_ENUMERATION("Media.VideoColorRange", color_range, |
| 177 AVCOL_RANGE_NB); // PRESUBMIT_IGNORE_UMA_MAX | 179 AVCOL_RANGE_NB); // PRESUBMIT_IGNORE_UMA_MAX |
| 178 } | 180 } |
| 179 | 181 |
| 180 static const char kCodecNone[] = "none"; | 182 static const char kCodecNone[] = "none"; |
| 181 | 183 |
| 182 static const char* GetCodecName(const AVCodecContext* context) { | 184 static const char* GetCodecName(enum AVCodecID id) { |
| 183 if (context->codec_descriptor) | 185 const AVCodecDescriptor* codec_descriptor = avcodec_descriptor_get(id); |
| 184 return context->codec_descriptor->name; | |
| 185 const AVCodecDescriptor* codec_descriptor = | |
| 186 avcodec_descriptor_get(context->codec_id); | |
| 187 // If the codec name can't be determined, return none for tracking. | 186 // If the codec name can't be determined, return none for tracking. |
| 188 return codec_descriptor ? codec_descriptor->name : kCodecNone; | 187 return codec_descriptor ? codec_descriptor->name : kCodecNone; |
| 189 } | 188 } |
| 190 | 189 |
| 191 static void SetTimeProperty(MediaLogEvent* event, | 190 static void SetTimeProperty(MediaLogEvent* event, |
| 192 const std::string& key, | 191 const std::string& key, |
| 193 base::TimeDelta value) { | 192 base::TimeDelta value) { |
| 194 if (value == kInfiniteDuration) | 193 if (value == kInfiniteDuration) |
| 195 event->params.SetString(key, "kInfiniteDuration"); | 194 event->params.SetString(key, "kInfiniteDuration"); |
| 196 else if (value == kNoTimestamp) | 195 else if (value == kNoTimestamp) |
| 197 event->params.SetString(key, "kNoTimestamp"); | 196 event->params.SetString(key, "kNoTimestamp"); |
| 198 else | 197 else |
| 199 event->params.SetDouble(key, value.InSecondsF()); | 198 event->params.SetDouble(key, value.InSecondsF()); |
| 200 } | 199 } |
| 201 | 200 |
| 202 std::unique_ptr<FFmpegDemuxerStream> FFmpegDemuxerStream::Create( | 201 std::unique_ptr<FFmpegDemuxerStream> FFmpegDemuxerStream::Create( |
| 203 FFmpegDemuxer* demuxer, | 202 FFmpegDemuxer* demuxer, |
| 204 AVStream* stream, | 203 AVStream* stream, |
| 205 const scoped_refptr<MediaLog>& media_log) { | 204 const scoped_refptr<MediaLog>& media_log) { |
| 206 if (!demuxer || !stream) | 205 if (!demuxer || !stream) |
| 207 return nullptr; | 206 return nullptr; |
| 208 | 207 |
| 209 std::unique_ptr<FFmpegDemuxerStream> demuxer_stream; | 208 std::unique_ptr<FFmpegDemuxerStream> demuxer_stream; |
| 210 std::unique_ptr<AudioDecoderConfig> audio_config; | 209 std::unique_ptr<AudioDecoderConfig> audio_config; |
| 211 std::unique_ptr<VideoDecoderConfig> video_config; | 210 std::unique_ptr<VideoDecoderConfig> video_config; |
| 212 | 211 |
| 213 if (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) { | 212 if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { |
| 214 audio_config.reset(new AudioDecoderConfig()); | 213 audio_config.reset(new AudioDecoderConfig()); |
| 215 | 214 |
| 216 // IsValidConfig() checks that the codec is supported and that the channel | 215 // IsValidConfig() checks that the codec is supported and that the channel |
| 217 // layout and sample format are valid. | 216 // layout and sample format are valid. |
| 218 // | 217 // |
| 219 // TODO(chcunningham): Change AVStreamToAudioDecoderConfig to check | 218 // TODO(chcunningham): Change AVStreamToAudioDecoderConfig to check |
| 220 // IsValidConfig internally and return a null scoped_ptr if not valid. | 219 // IsValidConfig internally and return a null scoped_ptr if not valid. |
| 221 if (!AVStreamToAudioDecoderConfig(stream, audio_config.get()) || | 220 if (!AVStreamToAudioDecoderConfig(stream, audio_config.get()) || |
| 222 !audio_config->IsValidConfig()) { | 221 !audio_config->IsValidConfig()) { |
| 223 MEDIA_LOG(ERROR, media_log) | 222 MEDIA_LOG(ERROR, media_log) |
| 224 << "FFmpegDemuxer: failed creating audio stream"; | 223 << "FFmpegDemuxer: failed creating audio stream"; |
| 225 return nullptr; | 224 return nullptr; |
| 226 } | 225 } |
| 227 | 226 |
| 228 MEDIA_LOG(INFO, media_log) << "FFmpegDemuxer: created audio stream, config " | 227 MEDIA_LOG(INFO, media_log) << "FFmpegDemuxer: created audio stream, config " |
| 229 << audio_config->AsHumanReadableString(); | 228 << audio_config->AsHumanReadableString(); |
| 230 } else if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO) { | 229 } else if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { |
| 231 video_config.reset(new VideoDecoderConfig()); | 230 video_config.reset(new VideoDecoderConfig()); |
| 232 | 231 |
| 233 // IsValidConfig() checks that the codec is supported and that the channel | 232 // IsValidConfig() checks that the codec is supported and that the channel |
| 234 // layout and sample format are valid. | 233 // layout and sample format are valid. |
| 235 // | 234 // |
| 236 // TODO(chcunningham): Change AVStreamToVideoDecoderConfig to check | 235 // TODO(chcunningham): Change AVStreamToVideoDecoderConfig to check |
| 237 // IsValidConfig internally and return a null scoped_ptr if not valid. | 236 // IsValidConfig internally and return a null scoped_ptr if not valid. |
| 238 if (!AVStreamToVideoDecoderConfig(stream, video_config.get()) || | 237 if (!AVStreamToVideoDecoderConfig(stream, video_config.get()) || |
| 239 !video_config->IsValidConfig()) { | 238 !video_config->IsValidConfig()) { |
| 240 MEDIA_LOG(ERROR, media_log) | 239 MEDIA_LOG(ERROR, media_log) |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 277 is_enabled_(true), | 276 is_enabled_(true), |
| 278 waiting_for_keyframe_(false), | 277 waiting_for_keyframe_(false), |
| 279 fixup_negative_timestamps_(false) { | 278 fixup_negative_timestamps_(false) { |
| 280 DCHECK(demuxer_); | 279 DCHECK(demuxer_); |
| 281 | 280 |
| 282 bool is_encrypted = false; | 281 bool is_encrypted = false; |
| 283 int rotation = 0; | 282 int rotation = 0; |
| 284 AVDictionaryEntry* rotation_entry = NULL; | 283 AVDictionaryEntry* rotation_entry = NULL; |
| 285 | 284 |
| 286 // Determine our media format. | 285 // Determine our media format. |
| 287 switch (stream->codec->codec_type) { | 286 switch (stream->codecpar->codec_type) { |
| 288 case AVMEDIA_TYPE_AUDIO: | 287 case AVMEDIA_TYPE_AUDIO: |
| 289 DCHECK(audio_config_.get() && !video_config_.get()); | 288 DCHECK(audio_config_.get() && !video_config_.get()); |
| 290 type_ = AUDIO; | 289 type_ = AUDIO; |
| 291 is_encrypted = audio_config_->is_encrypted(); | 290 is_encrypted = audio_config_->is_encrypted(); |
| 292 break; | 291 break; |
| 293 case AVMEDIA_TYPE_VIDEO: | 292 case AVMEDIA_TYPE_VIDEO: |
| 294 DCHECK(video_config_.get() && !audio_config_.get()); | 293 DCHECK(video_config_.get() && !audio_config_.get()); |
| 295 type_ = VIDEO; | 294 type_ = VIDEO; |
| 296 is_encrypted = video_config_->is_encrypted(); | 295 is_encrypted = video_config_->is_encrypted(); |
| 297 | 296 |
| (...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 492 if (start_time > base::TimeDelta()) | 491 if (start_time > base::TimeDelta()) |
| 493 start_time = base::TimeDelta(); | 492 start_time = base::TimeDelta(); |
| 494 | 493 |
| 495 buffer->set_timestamp(stream_timestamp - start_time); | 494 buffer->set_timestamp(stream_timestamp - start_time); |
| 496 | 495 |
| 497 // If enabled, and no codec delay is present, mark audio packets with | 496 // If enabled, and no codec delay is present, mark audio packets with |
| 498 // negative timestamps for post-decode discard. | 497 // negative timestamps for post-decode discard. |
| 499 if (fixup_negative_timestamps_ && is_audio && | 498 if (fixup_negative_timestamps_ && is_audio && |
| 500 stream_timestamp < base::TimeDelta() && | 499 stream_timestamp < base::TimeDelta() && |
| 501 buffer->duration() != kNoTimestamp) { | 500 buffer->duration() != kNoTimestamp) { |
| 502 if (!stream_->codec->delay) { | 501 if (!audio_decoder_config().codec_delay()) { |
| 503 DCHECK_EQ(buffer->discard_padding().first, base::TimeDelta()); | 502 DCHECK_EQ(buffer->discard_padding().first, base::TimeDelta()); |
| 504 | 503 |
| 505 if (stream_timestamp + buffer->duration() < base::TimeDelta()) { | 504 if (stream_timestamp + buffer->duration() < base::TimeDelta()) { |
| 506 DCHECK_EQ(buffer->discard_padding().second, base::TimeDelta()); | 505 DCHECK_EQ(buffer->discard_padding().second, base::TimeDelta()); |
| 507 | 506 |
| 508 // Discard the entire packet if it's entirely before zero. | 507 // Discard the entire packet if it's entirely before zero. |
| 509 buffer->set_discard_padding( | 508 buffer->set_discard_padding( |
| 510 std::make_pair(kInfiniteDuration, base::TimeDelta())); | 509 std::make_pair(kInfiniteDuration, base::TimeDelta())); |
| 511 } else { | 510 } else { |
| 512 // Only discard part of the frame if it overlaps zero. | 511 // Only discard part of the frame if it overlaps zero. |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 657 | 656 |
| 658 void FFmpegDemuxerStream::ResetBitstreamConverter() { | 657 void FFmpegDemuxerStream::ResetBitstreamConverter() { |
| 659 #if defined(USE_PROPRIETARY_CODECS) | 658 #if defined(USE_PROPRIETARY_CODECS) |
| 660 if (bitstream_converter_) | 659 if (bitstream_converter_) |
| 661 InitBitstreamConverter(); | 660 InitBitstreamConverter(); |
| 662 #endif // defined(USE_PROPRIETARY_CODECS) | 661 #endif // defined(USE_PROPRIETARY_CODECS) |
| 663 } | 662 } |
| 664 | 663 |
| 665 void FFmpegDemuxerStream::InitBitstreamConverter() { | 664 void FFmpegDemuxerStream::InitBitstreamConverter() { |
| 666 #if defined(USE_PROPRIETARY_CODECS) | 665 #if defined(USE_PROPRIETARY_CODECS) |
| 667 switch (stream_->codec->codec_id) { | 666 switch (stream_->codecpar->codec_id) { |
| 668 case AV_CODEC_ID_H264: | 667 case AV_CODEC_ID_H264: |
| 669 // Clear |extra_data| so that future (fallback) decoders will know that | 668 // Clear |extra_data| so that future (fallback) decoders will know that |
| 670 // conversion is forcibly enabled on this stream. | 669 // conversion is forcibly enabled on this stream. |
| 671 // | 670 // |
| 672 // TODO(sandersd): Ideally we would convert |extra_data| to concatenated | 671 // TODO(sandersd): Ideally we would convert |extra_data| to concatenated |
| 673 // SPS/PPS data, but it's too late to be useful because Initialize() was | 672 // SPS/PPS data, but it's too late to be useful because Initialize() was |
| 674 // already called on GpuVideoDecoder, which is the only path that would | 673 // already called on GpuVideoDecoder, which is the only path that would |
| 675 // consume that data. | 674 // consume that data. |
| 676 if (video_config_) | 675 if (video_config_) |
| 677 video_config_->SetExtraData(std::vector<uint8_t>()); | 676 video_config_->SetExtraData(std::vector<uint8_t>()); |
| 678 bitstream_converter_.reset( | 677 bitstream_converter_.reset( |
| 679 new FFmpegH264ToAnnexBBitstreamConverter(stream_->codec)); | 678 new FFmpegH264ToAnnexBBitstreamConverter(stream_->codecpar)); |
| 680 break; | 679 break; |
| 681 #if BUILDFLAG(ENABLE_HEVC_DEMUXING) | 680 #if BUILDFLAG(ENABLE_HEVC_DEMUXING) |
| 682 case AV_CODEC_ID_HEVC: | 681 case AV_CODEC_ID_HEVC: |
| 683 bitstream_converter_.reset( | 682 bitstream_converter_.reset( |
| 684 new FFmpegH265ToAnnexBBitstreamConverter(stream_->codec)); | 683 new FFmpegH265ToAnnexBBitstreamConverter(stream_->codecpar)); |
| 685 break; | 684 break; |
| 686 #endif | 685 #endif |
| 687 case AV_CODEC_ID_AAC: | 686 case AV_CODEC_ID_AAC: |
| 688 bitstream_converter_.reset( | 687 bitstream_converter_.reset( |
| 689 new FFmpegAACBitstreamConverter(stream_->codec)); | 688 new FFmpegAACBitstreamConverter(stream_->codecpar)); |
| 690 break; | 689 break; |
| 691 default: | 690 default: |
| 692 break; | 691 break; |
| 693 } | 692 } |
| 694 #endif // defined(USE_PROPRIETARY_CODECS) | 693 #endif // defined(USE_PROPRIETARY_CODECS) |
| 695 } | 694 } |
| 696 | 695 |
| 697 bool FFmpegDemuxerStream::SupportsConfigChanges() { return false; } | 696 bool FFmpegDemuxerStream::SupportsConfigChanges() { return false; } |
| 698 | 697 |
| 699 AudioDecoderConfig FFmpegDemuxerStream::audio_decoder_config() { | 698 AudioDecoderConfig FFmpegDemuxerStream::audio_decoder_config() { |
| (...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1104 static int CalculateBitrate(AVFormatContext* format_context, | 1103 static int CalculateBitrate(AVFormatContext* format_context, |
| 1105 const base::TimeDelta& duration, | 1104 const base::TimeDelta& duration, |
| 1106 int64_t filesize_in_bytes) { | 1105 int64_t filesize_in_bytes) { |
| 1107 // If there is a bitrate set on the container, use it. | 1106 // If there is a bitrate set on the container, use it. |
| 1108 if (format_context->bit_rate > 0) | 1107 if (format_context->bit_rate > 0) |
| 1109 return format_context->bit_rate; | 1108 return format_context->bit_rate; |
| 1110 | 1109 |
| 1111 // Then try to sum the bitrates individually per stream. | 1110 // Then try to sum the bitrates individually per stream. |
| 1112 int bitrate = 0; | 1111 int bitrate = 0; |
| 1113 for (size_t i = 0; i < format_context->nb_streams; ++i) { | 1112 for (size_t i = 0; i < format_context->nb_streams; ++i) { |
| 1114 AVCodecContext* codec_context = format_context->streams[i]->codec; | 1113 AVCodecParameters* codec_parameters = format_context->streams[i]->codecpar; |
| 1115 bitrate += codec_context->bit_rate; | 1114 bitrate += codec_parameters->bit_rate; |
| 1116 } | 1115 } |
| 1117 if (bitrate > 0) | 1116 if (bitrate > 0) |
| 1118 return bitrate; | 1117 return bitrate; |
| 1119 | 1118 |
| 1120 // See if we can approximate the bitrate as long as we have a filesize and | 1119 // See if we can approximate the bitrate as long as we have a filesize and |
| 1121 // valid duration. | 1120 // valid duration. |
| 1122 if (duration.InMicroseconds() <= 0 || duration == kInfiniteDuration || | 1121 if (duration.InMicroseconds() <= 0 || duration == kInfiniteDuration || |
| 1123 filesize_in_bytes == 0) { | 1122 filesize_in_bytes == 0) { |
| 1124 return 0; | 1123 return 0; |
| 1125 } | 1124 } |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1216 | 1215 |
| 1217 // If available, |start_time_| will be set to the lowest stream start time. | 1216 // If available, |start_time_| will be set to the lowest stream start time. |
| 1218 start_time_ = kInfiniteDuration; | 1217 start_time_ = kInfiniteDuration; |
| 1219 | 1218 |
| 1220 base::TimeDelta max_duration; | 1219 base::TimeDelta max_duration; |
| 1221 int detected_audio_track_count = 0; | 1220 int detected_audio_track_count = 0; |
| 1222 int detected_video_track_count = 0; | 1221 int detected_video_track_count = 0; |
| 1223 int detected_text_track_count = 0; | 1222 int detected_text_track_count = 0; |
| 1224 for (size_t i = 0; i < format_context->nb_streams; ++i) { | 1223 for (size_t i = 0; i < format_context->nb_streams; ++i) { |
| 1225 AVStream* stream = format_context->streams[i]; | 1224 AVStream* stream = format_context->streams[i]; |
| 1226 const AVCodecContext* codec_context = stream->codec; | 1225 const AVCodecParameters* codec_parameters = stream->codecpar; |
| 1227 const AVMediaType codec_type = codec_context->codec_type; | 1226 const AVMediaType codec_type = codec_parameters->codec_type; |
| 1227 const AVCodecID codec_id = codec_parameters->codec_id; |
| 1228 | 1228 |
| 1229 if (codec_type == AVMEDIA_TYPE_AUDIO) { | 1229 if (codec_type == AVMEDIA_TYPE_AUDIO) { |
| 1230 // Log the codec detected, whether it is supported or not, and whether or | 1230 // Log the codec detected, whether it is supported or not, and whether or |
| 1231 // not we have already detected a supported codec in another stream. | 1231 // not we have already detected a supported codec in another stream. |
| 1232 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodecHash", | 1232 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodecHash", |
| 1233 HashCodecName(GetCodecName(codec_context))); | 1233 HashCodecName(GetCodecName(codec_id))); |
| 1234 detected_audio_track_count++; | 1234 detected_audio_track_count++; |
| 1235 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { | 1235 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { |
| 1236 // Log the codec detected, whether it is supported or not, and whether or | 1236 // Log the codec detected, whether it is supported or not, and whether or |
| 1237 // not we have already detected a supported codec in another stream. | 1237 // not we have already detected a supported codec in another stream. |
| 1238 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodecHash", | 1238 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodecHash", |
| 1239 HashCodecName(GetCodecName(codec_context))); | 1239 HashCodecName(GetCodecName(codec_id))); |
| 1240 detected_video_track_count++; | 1240 detected_video_track_count++; |
| 1241 | 1241 |
| 1242 #if BUILDFLAG(ENABLE_HEVC_DEMUXING) | 1242 #if BUILDFLAG(ENABLE_HEVC_DEMUXING) |
| 1243 if (stream->codec->codec_id == AV_CODEC_ID_HEVC) { | 1243 if (codec_id == AV_CODEC_ID_HEVC) { |
| 1244 // If ffmpeg is built without HEVC parser/decoder support, it will be | 1244 // If ffmpeg is built without HEVC parser/decoder support, it will be |
| 1245 // able to demux HEVC based solely on container-provided information, | 1245 // able to demux HEVC based solely on container-provided information, |
| 1246 // but unable to get some of the parameters without parsing the stream | 1246 // but unable to get some of the parameters without parsing the stream |
| 1247 // (e.g. coded size needs to be read from SPS, pixel format is typically | 1247 // (e.g. coded size needs to be read from SPS, pixel format is typically |
| 1248 // deduced from decoder config in hvcC box). These are not really needed | 1248 // deduced from decoder config in hvcC box). These are not really needed |
| 1249 // when using external decoder (e.g. hardware decoder), so override them | 1249 // when using external decoder (e.g. hardware decoder), so override them |
| 1250 // here, to make sure this translates into a valid VideoDecoderConfig. | 1250 // to make sure this translates into a valid VideoDecoderConfig. Coded |
| 1251 if (stream->codec->coded_width == 0 && | 1251 // size is overridden in AVStreamToVideoDecoderConfig(). |
| 1252 stream->codec->coded_height == 0) { | 1252 if (stream->codecpar->format == AV_PIX_FMT_NONE) |
| 1253 DCHECK(stream->codec->width > 0); | 1253 stream->codecpar->format = AV_PIX_FMT_YUV420P; |
| 1254 DCHECK(stream->codec->height > 0); | |
| 1255 stream->codec->coded_width = stream->codec->width; | |
| 1256 stream->codec->coded_height = stream->codec->height; | |
| 1257 } | |
| 1258 if (stream->codec->pix_fmt == AV_PIX_FMT_NONE) { | |
| 1259 stream->codec->pix_fmt = AV_PIX_FMT_YUV420P; | |
| 1260 } | |
| 1261 } | 1254 } |
| 1262 #endif | 1255 #endif |
| 1263 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) { | 1256 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) { |
| 1264 detected_text_track_count++; | 1257 detected_text_track_count++; |
| 1265 if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) { | 1258 if (codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) { |
| 1266 continue; | 1259 continue; |
| 1267 } | 1260 } |
| 1268 } else { | 1261 } else { |
| 1269 continue; | 1262 continue; |
| 1270 } | 1263 } |
| 1271 | 1264 |
| 1272 // Attempt to create a FFmpegDemuxerStream from the AVStream. This will | 1265 // Attempt to create a FFmpegDemuxerStream from the AVStream. This will |
| 1273 // return nullptr if the AVStream is invalid. Validity checks will verify | 1266 // return nullptr if the AVStream is invalid. Validity checks will verify |
| 1274 // things like: codec, channel layout, sample/pixel format, etc... | 1267 // things like: codec, channel layout, sample/pixel format, etc... |
| 1275 std::unique_ptr<FFmpegDemuxerStream> demuxer_stream = | 1268 std::unique_ptr<FFmpegDemuxerStream> demuxer_stream = |
| 1276 FFmpegDemuxerStream::Create(this, stream, media_log_); | 1269 FFmpegDemuxerStream::Create(this, stream, media_log_); |
| 1277 if (demuxer_stream.get()) { | 1270 if (demuxer_stream.get()) { |
| 1278 streams_[i] = std::move(demuxer_stream); | 1271 streams_[i] = std::move(demuxer_stream); |
| 1279 } else { | 1272 } else { |
| 1280 if (codec_type == AVMEDIA_TYPE_AUDIO) { | 1273 if (codec_type == AVMEDIA_TYPE_AUDIO) { |
| 1281 MEDIA_LOG(INFO, media_log_) | 1274 MEDIA_LOG(INFO, media_log_) |
| 1282 << GetDisplayName() | 1275 << GetDisplayName() |
| 1283 << ": skipping invalid or unsupported audio track"; | 1276 << ": skipping invalid or unsupported audio track"; |
| 1284 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { | 1277 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { |
| 1285 MEDIA_LOG(INFO, media_log_) | 1278 MEDIA_LOG(INFO, media_log_) |
| 1286 << GetDisplayName() | 1279 << GetDisplayName() |
| 1287 << ": skipping invalid or unsupported video track"; | 1280 << ": skipping invalid or unsupported video track"; |
| 1288 } | 1281 } |
| 1289 | 1282 |
| 1290 // This AVStream does not successfully convert. | 1283 // This AVStream does not successfully convert. |
| 1291 continue; | 1284 continue; |
| 1292 } | 1285 } |
| 1293 | 1286 |
| 1294 StreamParser::TrackId track_id = stream->id; | 1287 StreamParser::TrackId track_id = stream->id; |
| 1295 | |
| 1296 if ((codec_type == AVMEDIA_TYPE_AUDIO && | |
| 1297 media_tracks->getAudioConfig(track_id).IsValidConfig()) || | |
| 1298 (codec_type == AVMEDIA_TYPE_VIDEO && | |
| 1299 media_tracks->getVideoConfig(track_id).IsValidConfig())) { | |
| 1300 MEDIA_LOG(INFO, media_log_) | |
| 1301 << GetDisplayName() | |
| 1302 << ": skipping duplicate media stream id=" << track_id; | |
| 1303 continue; | |
| 1304 } | |
| 1305 | |
| 1306 std::string track_label = streams_[i]->GetMetadata("handler_name"); | 1288 std::string track_label = streams_[i]->GetMetadata("handler_name"); |
| 1307 std::string track_language = streams_[i]->GetMetadata("language"); | 1289 std::string track_language = streams_[i]->GetMetadata("language"); |
| 1308 | 1290 |
| 1309 // Some metadata is named differently in FFmpeg for webm files. | 1291 // Some metadata is named differently in FFmpeg for webm files. |
| 1310 if (strstr(format_context->iformat->name, "webm") || | 1292 if (strstr(format_context->iformat->name, "webm") || |
| 1311 strstr(format_context->iformat->name, "matroska")) { | 1293 strstr(format_context->iformat->name, "matroska")) { |
| 1312 // TODO(servolk): FFmpeg doesn't set stream->id correctly for webm files. | 1294 // TODO(servolk): FFmpeg doesn't set stream->id correctly for webm files. |
| 1313 // Need to fix that and use it as track id. crbug.com/323183 | 1295 // Need to fix that and use it as track id. crbug.com/323183 |
| 1314 track_id = | 1296 track_id = |
| 1315 static_cast<StreamParser::TrackId>(media_tracks->tracks().size() + 1); | 1297 static_cast<StreamParser::TrackId>(media_tracks->tracks().size() + 1); |
| 1316 track_label = streams_[i]->GetMetadata("title"); | 1298 track_label = streams_[i]->GetMetadata("title"); |
| 1317 } | 1299 } |
| 1318 | 1300 |
| 1301 if ((codec_type == AVMEDIA_TYPE_AUDIO && |
| 1302 media_tracks->getAudioConfig(track_id).IsValidConfig()) || |
| 1303 (codec_type == AVMEDIA_TYPE_VIDEO && |
| 1304 media_tracks->getVideoConfig(track_id).IsValidConfig())) { |
| 1305 MEDIA_LOG(INFO, media_log_) |
| 1306 << GetDisplayName() |
| 1307 << ": skipping duplicate media stream id=" << track_id; |
| 1308 continue; |
| 1309 } |
| 1310 |
| 1319 // Note when we find our audio/video stream (we only want one of each) and | 1311 // Note when we find our audio/video stream (we only want one of each) and |
| 1320 // record src= playback UMA stats for the stream's decoder config. | 1312 // record src= playback UMA stats for the stream's decoder config. |
| 1321 MediaTrack* media_track = nullptr; | 1313 MediaTrack* media_track = nullptr; |
| 1322 if (codec_type == AVMEDIA_TYPE_AUDIO) { | 1314 if (codec_type == AVMEDIA_TYPE_AUDIO) { |
| 1323 AudioDecoderConfig audio_config = streams_[i]->audio_decoder_config(); | 1315 AudioDecoderConfig audio_config = streams_[i]->audio_decoder_config(); |
| 1324 RecordAudioCodecStats(audio_config); | 1316 RecordAudioCodecStats(audio_config); |
| 1325 | 1317 |
| 1326 media_track = media_tracks->AddAudioTrack(audio_config, track_id, "main", | 1318 media_track = media_tracks->AddAudioTrack(audio_config, track_id, "main", |
| 1327 track_label, track_language); | 1319 track_label, track_language); |
| 1328 media_track->set_id(base::UintToString(track_id)); | 1320 media_track->set_id(base::UintToString(track_id)); |
| 1329 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) == | 1321 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) == |
| 1330 track_id_to_demux_stream_map_.end()); | 1322 track_id_to_demux_stream_map_.end()); |
| 1331 track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get(); | 1323 track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get(); |
| 1332 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { | 1324 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { |
| 1333 VideoDecoderConfig video_config = streams_[i]->video_decoder_config(); | 1325 VideoDecoderConfig video_config = streams_[i]->video_decoder_config(); |
| 1334 | 1326 |
| 1335 RecordVideoCodecStats(video_config, stream->codec->color_range, | 1327 RecordVideoCodecStats(video_config, stream->codecpar->color_range, |
| 1336 media_log_.get()); | 1328 media_log_.get()); |
| 1337 | 1329 |
| 1338 media_track = media_tracks->AddVideoTrack(video_config, track_id, "main", | 1330 media_track = media_tracks->AddVideoTrack(video_config, track_id, "main", |
| 1339 track_label, track_language); | 1331 track_label, track_language); |
| 1340 media_track->set_id(base::UintToString(track_id)); | 1332 media_track->set_id(base::UintToString(track_id)); |
| 1341 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) == | 1333 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) == |
| 1342 track_id_to_demux_stream_map_.end()); | 1334 track_id_to_demux_stream_map_.end()); |
| 1343 track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get(); | 1335 track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get(); |
| 1344 } | 1336 } |
| 1345 | 1337 |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1398 // http://xiph.org/vorbis/doc/Vorbis_I_spec.html | 1390 // http://xiph.org/vorbis/doc/Vorbis_I_spec.html |
| 1399 // | 1391 // |
| 1400 // FFmpeg's use of negative timestamps for opus pre-skip is nonstandard, but | 1392 // FFmpeg's use of negative timestamps for opus pre-skip is nonstandard, but |
| 1401 // for more information on pre-skip see section 4.2 of the Ogg Opus spec: | 1393 // for more information on pre-skip see section 4.2 of the Ogg Opus spec: |
| 1402 // https://tools.ietf.org/html/draft-ietf-codec-oggopus-08#section-4.2 | 1394 // https://tools.ietf.org/html/draft-ietf-codec-oggopus-08#section-4.2 |
| 1403 for (const auto& stream : streams_) { | 1395 for (const auto& stream : streams_) { |
| 1404 if (!stream || stream->type() != DemuxerStream::AUDIO) | 1396 if (!stream || stream->type() != DemuxerStream::AUDIO) |
| 1405 continue; | 1397 continue; |
| 1406 const AVStream* audio_stream = stream->av_stream(); | 1398 const AVStream* audio_stream = stream->av_stream(); |
| 1407 DCHECK(audio_stream); | 1399 DCHECK(audio_stream); |
| 1408 if (audio_stream->codec->codec_id == AV_CODEC_ID_OPUS || | 1400 if (audio_stream->codecpar->codec_id == AV_CODEC_ID_OPUS || |
| 1409 (strcmp(format_context->iformat->name, "ogg") == 0 && | 1401 (strcmp(format_context->iformat->name, "ogg") == 0 && |
| 1410 audio_stream->codec->codec_id == AV_CODEC_ID_VORBIS)) { | 1402 audio_stream->codecpar->codec_id == AV_CODEC_ID_VORBIS)) { |
| 1411 for (size_t i = 0; i < streams_.size(); ++i) { | 1403 for (size_t i = 0; i < streams_.size(); ++i) { |
| 1412 if (!streams_[i]) | 1404 if (!streams_[i]) |
| 1413 continue; | 1405 continue; |
| 1414 streams_[i]->enable_negative_timestamp_fixups(); | 1406 streams_[i]->enable_negative_timestamp_fixups(); |
| 1415 | 1407 |
| 1416 // Fixup the seeking information to avoid selecting the audio stream | 1408 // Fixup the seeking information to avoid selecting the audio stream |
| 1417 // simply because it has a lower starting time. | 1409 // simply because it has a lower starting time. |
| 1418 if (streams_[i]->av_stream() == audio_stream && | 1410 if (streams_[i]->av_stream() == audio_stream && |
| 1419 streams_[i]->start_time() < base::TimeDelta()) { | 1411 streams_[i]->start_time() < base::TimeDelta()) { |
| 1420 streams_[i]->set_start_time(base::TimeDelta()); | 1412 streams_[i]->set_start_time(base::TimeDelta()); |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1480 int video_track_count = 0; | 1472 int video_track_count = 0; |
| 1481 for (size_t i = 0; i < streams_.size(); ++i) { | 1473 for (size_t i = 0; i < streams_.size(); ++i) { |
| 1482 FFmpegDemuxerStream* stream = streams_[i].get(); | 1474 FFmpegDemuxerStream* stream = streams_[i].get(); |
| 1483 if (!stream) | 1475 if (!stream) |
| 1484 continue; | 1476 continue; |
| 1485 if (stream->type() == DemuxerStream::AUDIO) { | 1477 if (stream->type() == DemuxerStream::AUDIO) { |
| 1486 ++audio_track_count; | 1478 ++audio_track_count; |
| 1487 std::string suffix = ""; | 1479 std::string suffix = ""; |
| 1488 if (audio_track_count > 1) | 1480 if (audio_track_count > 1) |
| 1489 suffix = "_track" + base::IntToString(audio_track_count); | 1481 suffix = "_track" + base::IntToString(audio_track_count); |
| 1490 const AVCodecContext* audio_codec = avctx->streams[i]->codec; | 1482 const AVCodecParameters* audio_parameters = avctx->streams[i]->codecpar; |
| 1491 const AudioDecoderConfig& audio_config = stream->audio_decoder_config(); | 1483 const AudioDecoderConfig& audio_config = stream->audio_decoder_config(); |
| 1492 params.SetString("audio_codec_name" + suffix, GetCodecName(audio_codec)); | 1484 params.SetString("audio_codec_name" + suffix, |
| 1493 params.SetInteger("audio_channels_count" + suffix, audio_codec->channels); | 1485 GetCodecName(audio_parameters->codec_id)); |
| 1486 params.SetInteger("audio_channels_count" + suffix, |
| 1487 audio_parameters->channels); |
| 1494 params.SetString("audio_sample_format" + suffix, | 1488 params.SetString("audio_sample_format" + suffix, |
| 1495 SampleFormatToString(audio_config.sample_format())); | 1489 SampleFormatToString(audio_config.sample_format())); |
| 1496 params.SetInteger("audio_samples_per_second" + suffix, | 1490 params.SetInteger("audio_samples_per_second" + suffix, |
| 1497 audio_config.samples_per_second()); | 1491 audio_config.samples_per_second()); |
| 1498 } else if (stream->type() == DemuxerStream::VIDEO) { | 1492 } else if (stream->type() == DemuxerStream::VIDEO) { |
| 1499 ++video_track_count; | 1493 ++video_track_count; |
| 1500 std::string suffix = ""; | 1494 std::string suffix = ""; |
| 1501 if (video_track_count > 1) | 1495 if (video_track_count > 1) |
| 1502 suffix = "_track" + base::IntToString(video_track_count); | 1496 suffix = "_track" + base::IntToString(video_track_count); |
| 1503 const AVCodecContext* video_codec = avctx->streams[i]->codec; | 1497 const AVStream* video_av_stream = avctx->streams[i]; |
| 1498 const AVCodecParameters* video_parameters = video_av_stream->codecpar; |
| 1504 const VideoDecoderConfig& video_config = stream->video_decoder_config(); | 1499 const VideoDecoderConfig& video_config = stream->video_decoder_config(); |
| 1505 params.SetString("video_codec_name" + suffix, GetCodecName(video_codec)); | 1500 params.SetString("video_codec_name" + suffix, |
| 1506 params.SetInteger("width" + suffix, video_codec->width); | 1501 GetCodecName(video_parameters->codec_id)); |
| 1507 params.SetInteger("height" + suffix, video_codec->height); | 1502 params.SetInteger("width" + suffix, video_parameters->width); |
| 1508 params.SetString("time_base" + suffix, | 1503 params.SetInteger("height" + suffix, video_parameters->height); |
| 1509 base::StringPrintf("%d/%d", video_codec->time_base.num, | 1504 |
| 1510 video_codec->time_base.den)); | 1505 // AVCodecParameters has no time_base field. We use the one from AVStream |
| 1506 // here. |
| 1507 params.SetString( |
| 1508 "time_base" + suffix, |
| 1509 base::StringPrintf("%d/%d", video_av_stream->time_base.num, |
| 1510 video_av_stream->time_base.den)); |
| 1511 |
| 1511 params.SetString("video_format" + suffix, | 1512 params.SetString("video_format" + suffix, |
| 1512 VideoPixelFormatToString(video_config.format())); | 1513 VideoPixelFormatToString(video_config.format())); |
| 1513 params.SetBoolean("video_is_encrypted" + suffix, | 1514 params.SetBoolean("video_is_encrypted" + suffix, |
| 1514 video_config.is_encrypted()); | 1515 video_config.is_encrypted()); |
| 1515 } | 1516 } |
| 1516 } | 1517 } |
| 1517 params.SetBoolean("found_audio_stream", (audio_track_count > 0)); | 1518 params.SetBoolean("found_audio_stream", (audio_track_count > 0)); |
| 1518 params.SetBoolean("found_video_stream", (video_track_count > 0)); | 1519 params.SetBoolean("found_video_stream", (video_track_count > 0)); |
| 1519 SetTimeProperty(metadata_event.get(), "max_duration", max_duration); | 1520 SetTimeProperty(metadata_event.get(), "max_duration", max_duration); |
| 1520 SetTimeProperty(metadata_event.get(), "start_time", start_time_); | 1521 SetTimeProperty(metadata_event.get(), "start_time", start_time_); |
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1782 | 1783 |
| 1783 void FFmpegDemuxer::SetLiveness(DemuxerStream::Liveness liveness) { | 1784 void FFmpegDemuxer::SetLiveness(DemuxerStream::Liveness liveness) { |
| 1784 DCHECK(task_runner_->BelongsToCurrentThread()); | 1785 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 1785 for (const auto& stream : streams_) { | 1786 for (const auto& stream : streams_) { |
| 1786 if (stream) | 1787 if (stream) |
| 1787 stream->SetLiveness(liveness); | 1788 stream->SetLiveness(liveness); |
| 1788 } | 1789 } |
| 1789 } | 1790 } |
| 1790 | 1791 |
| 1791 } // namespace media | 1792 } // namespace media |
| OLD | NEW |