OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include <limits> |
| 6 #include <queue> |
| 7 |
| 8 #include "base/base_paths.h" |
| 9 #include "base/file_path.h" |
| 10 #include "base/file_util.h" |
| 11 #include "base/path_service.h" |
| 12 #include "base/perftimer.h" |
| 13 #include "base/test/perf_test_suite.h" |
| 14 #include "media/base/media.h" |
| 15 #include "media/ffmpeg/ffmpeg_common.h" |
| 16 #include "media/ffmpeg/ffmpeg_util.h" |
| 17 #include "media/ffmpeg/file_protocol.h" |
| 18 #include "testing/gtest/include/gtest/gtest.h" |
| 19 |
| 20 int main(int argc, char** argv) { |
| 21 return PerfTestSuite(argc, argv).Run(); |
| 22 } |
| 23 |
| 24 namespace media { |
| 25 |
| 26 class AVPacketQueue { |
| 27 public: |
| 28 AVPacketQueue() { |
| 29 } |
| 30 |
| 31 ~AVPacketQueue() { |
| 32 flush(); |
| 33 } |
| 34 |
| 35 bool empty() { |
| 36 return packets_.empty(); |
| 37 } |
| 38 |
| 39 AVPacket* peek() { |
| 40 return packets_.front(); |
| 41 } |
| 42 |
| 43 void pop() { |
| 44 AVPacket* packet = packets_.front(); |
| 45 packets_.pop(); |
| 46 av_free_packet(packet); |
| 47 delete packet; |
| 48 } |
| 49 |
| 50 void push(AVPacket* packet) { |
| 51 av_dup_packet(packet); |
| 52 packets_.push(packet); |
| 53 } |
| 54 |
| 55 void flush() { |
| 56 while (!empty()) { |
| 57 pop(); |
| 58 } |
| 59 } |
| 60 |
| 61 private: |
| 62 std::queue<AVPacket*> packets_; |
| 63 |
| 64 DISALLOW_COPY_AND_ASSIGN(AVPacketQueue); |
| 65 }; |
| 66 |
| 67 class FFmpegTest : public testing::TestWithParam<const char*> { |
| 68 protected: |
| 69 FFmpegTest() |
| 70 : av_format_context_(NULL), |
| 71 audio_stream_index_(-1), |
| 72 video_stream_index_(-1), |
| 73 audio_buffer_(NULL), |
| 74 video_buffer_(NULL), |
| 75 decoded_audio_time_(AV_NOPTS_VALUE), |
| 76 decoded_audio_duration_(AV_NOPTS_VALUE), |
| 77 decoded_video_time_(AV_NOPTS_VALUE), |
| 78 decoded_video_duration_(AV_NOPTS_VALUE), |
| 79 duration_(AV_NOPTS_VALUE) { |
| 80 InitializeFFmpeg(); |
| 81 |
| 82 audio_buffer_.reset( |
| 83 reinterpret_cast<int16*>(av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE))); |
| 84 video_buffer_.reset(avcodec_alloc_frame()); |
| 85 } |
| 86 |
| 87 virtual ~FFmpegTest() { |
| 88 } |
| 89 |
| 90 void OpenAndReadFile(const std::string& name) { |
| 91 OpenFile(name); |
| 92 OpenCodecs(); |
| 93 ReadRemainingFile(); |
| 94 } |
| 95 |
| 96 void OpenFile(const std::string& name) { |
| 97 FilePath path; |
| 98 PathService::Get(base::DIR_SOURCE_ROOT, &path); |
| 99 path = path.AppendASCII("media") |
| 100 .AppendASCII("test") |
| 101 .AppendASCII("data") |
| 102 .AppendASCII("content") |
| 103 .AppendASCII(name.c_str()); |
| 104 FilePath::StringType raw_path = path.value(); |
| 105 EXPECT_TRUE(file_util::PathExists(path)); |
| 106 |
| 107 #if defined(OS_WIN) |
| 108 std::string ascii_path = string_util::WideToASCII(path.value()); |
| 109 #else |
| 110 std::string ascii_path = path.value(); |
| 111 #endif |
| 112 |
| 113 EXPECT_EQ(0, av_open_input_file(&av_format_context_, |
| 114 ascii_path.c_str(), |
| 115 NULL, 0, NULL)) |
| 116 << "Could not open " << path.value(); |
| 117 EXPECT_LE(0, av_find_stream_info(av_format_context_)) |
| 118 << "Could not find stream information for " << path.value(); |
| 119 |
| 120 // Determine duration by picking max stream duration. |
| 121 for (unsigned int i = 0; i < av_format_context_->nb_streams; ++i) { |
| 122 AVStream* av_stream = av_format_context_->streams[i]; |
| 123 int64 duration = ConvertTimestamp(av_stream->time_base, |
| 124 av_stream->duration).InMicroseconds(); |
| 125 duration_ = std::max(duration_, duration); |
| 126 } |
| 127 |
| 128 // Final check to see if the container itself specifies a duration. |
| 129 AVRational av_time_base = {1, AV_TIME_BASE}; |
| 130 int64 duration = |
| 131 ConvertTimestamp(av_time_base, |
| 132 av_format_context_->duration).InMicroseconds(); |
| 133 duration_ = std::max(duration_, duration); |
| 134 } |
| 135 |
| 136 void CloseFile() { |
| 137 av_close_input_file(av_format_context_); |
| 138 } |
| 139 |
| 140 void OpenCodecs() { |
| 141 for (unsigned int i = 0; i < av_format_context_->nb_streams; ++i) { |
| 142 AVStream* av_stream = av_format_context_->streams[i]; |
| 143 AVCodecContext* av_codec_context = av_stream->codec; |
| 144 AVCodec* av_codec = avcodec_find_decoder(av_codec_context->codec_id); |
| 145 |
| 146 EXPECT_TRUE(av_codec) |
| 147 << "Could not find AVCodec with CodecID " |
| 148 << av_codec_context->codec_id; |
| 149 EXPECT_EQ(0, avcodec_open(av_codec_context, av_codec)) |
| 150 << "Could not open AVCodecContext with CodecID " |
| 151 << av_codec_context->codec_id; |
| 152 |
| 153 if (av_codec->type == CODEC_TYPE_AUDIO) { |
| 154 EXPECT_EQ(-1, audio_stream_index_) << "Found multiple audio streams."; |
| 155 audio_stream_index_ = static_cast<int>(i); |
| 156 } else if (av_codec->type == CODEC_TYPE_VIDEO) { |
| 157 EXPECT_EQ(-1, video_stream_index_) << "Found multiple video streams."; |
| 158 video_stream_index_ = static_cast<int>(i); |
| 159 } else { |
| 160 ADD_FAILURE() << "Found unknown stream type."; |
| 161 } |
| 162 } |
| 163 } |
| 164 |
| 165 void CloseCodecs() { |
| 166 for (unsigned int i = 0; i < av_format_context_->nb_streams; ++i) { |
| 167 AVStream* av_stream = av_format_context_->streams[i]; |
| 168 av_stream->discard = AVDISCARD_ALL; |
| 169 avcodec_close(av_stream->codec); |
| 170 } |
| 171 } |
| 172 |
| 173 void Flush() { |
| 174 if (has_audio()) { |
| 175 audio_packets_.flush(); |
| 176 avcodec_flush_buffers(av_audio_context()); |
| 177 } |
| 178 if (has_video()) { |
| 179 video_packets_.flush(); |
| 180 avcodec_flush_buffers(av_video_context()); |
| 181 } |
| 182 } |
| 183 |
| 184 void ReadUntil(int64 time) { |
| 185 while (true) { |
| 186 scoped_ptr<AVPacket> packet(new AVPacket()); |
| 187 if (av_read_frame(av_format_context_, packet.get()) < 0) { |
| 188 break; |
| 189 } |
| 190 |
| 191 int stream_index = static_cast<int>(packet->stream_index); |
| 192 int64 packet_time = AV_NOPTS_VALUE; |
| 193 if (stream_index == audio_stream_index_) { |
| 194 packet_time = |
| 195 ConvertTimestamp(av_audio_stream()->time_base, packet->pts) |
| 196 .InMicroseconds(); |
| 197 audio_packets_.push(packet.release()); |
| 198 } else if (stream_index == video_stream_index_) { |
| 199 packet_time = |
| 200 ConvertTimestamp(av_video_stream()->time_base, packet->pts) |
| 201 .InMicroseconds(); |
| 202 video_packets_.push(packet.release()); |
| 203 } else { |
| 204 ADD_FAILURE() << "Found packet that belongs to unknown stream."; |
| 205 } |
| 206 |
| 207 if (packet_time > time) { |
| 208 break; |
| 209 } |
| 210 } |
| 211 } |
| 212 |
| 213 void ReadRemainingFile() { |
| 214 ReadUntil(std::numeric_limits<int64>::max()); |
| 215 } |
| 216 |
| 217 bool StepDecodeAudio() { |
| 218 EXPECT_TRUE(has_audio()); |
| 219 if (!has_audio() || audio_packets_.empty()) { |
| 220 return false; |
| 221 } |
| 222 |
| 223 // Decode until output is produced, end of stream, or error. |
| 224 while (true) { |
| 225 int result = 0; |
| 226 int size_out = AVCODEC_MAX_AUDIO_FRAME_SIZE; |
| 227 bool end_of_stream = false; |
| 228 |
| 229 AVPacket packet; |
| 230 if (audio_packets_.empty()) { |
| 231 av_init_packet(&packet); |
| 232 end_of_stream = true; |
| 233 } else { |
| 234 memcpy(&packet, audio_packets_.peek(), sizeof(packet)); |
| 235 } |
| 236 |
| 237 result = avcodec_decode_audio3(av_audio_context(), audio_buffer_.get(), |
| 238 &size_out, audio_packets_.peek()); |
| 239 if (!audio_packets_.empty()) { |
| 240 audio_packets_.pop(); |
| 241 } |
| 242 |
| 243 EXPECT_GE(result, 0) << "Audio decode error."; |
| 244 if (result < 0 || size_out == 0 && end_of_stream) { |
| 245 return false; |
| 246 } |
| 247 |
| 248 if (result > 0) { |
| 249 // TODO(scherkus): move this to ffmpeg_util.h and dedup. |
| 250 int64 denominator = av_audio_context()->channels * |
| 251 av_get_bits_per_sample_format(av_audio_context()->sample_fmt) / 8 * |
| 252 av_audio_context()->sample_rate; |
| 253 double microseconds = size_out / |
| 254 (denominator / |
| 255 static_cast<double>(base::Time::kMicrosecondsPerSecond)); |
| 256 decoded_audio_duration_ = static_cast<int64>(microseconds); |
| 257 |
| 258 if (packet.pts == static_cast<int64>(AV_NOPTS_VALUE)) { |
| 259 EXPECT_NE(decoded_audio_time_, static_cast<int64>(AV_NOPTS_VALUE)) |
| 260 << "We never received an initial timestamped audio packet! " |
| 261 << "Looks like there's a seeking/parsing bug in FFmpeg."; |
| 262 decoded_audio_time_ += decoded_audio_duration_; |
| 263 } else { |
| 264 decoded_audio_time_ = |
| 265 ConvertTimestamp(av_audio_stream()->time_base, packet.pts) |
| 266 .InMicroseconds(); |
| 267 } |
| 268 return true; |
| 269 } |
| 270 } |
| 271 return true; |
| 272 } |
| 273 |
| 274 bool StepDecodeVideo() { |
| 275 EXPECT_TRUE(has_video()); |
| 276 if (!has_video() || video_packets_.empty()) { |
| 277 return false; |
| 278 } |
| 279 |
| 280 // Decode until output is produced, end of stream, or error. |
| 281 while (true) { |
| 282 int result = 0; |
| 283 int got_picture = 0; |
| 284 bool end_of_stream = false; |
| 285 |
| 286 AVPacket packet; |
| 287 if (video_packets_.empty()) { |
| 288 av_init_packet(&packet); |
| 289 end_of_stream = true; |
| 290 } else { |
| 291 memcpy(&packet, video_packets_.peek(), sizeof(packet)); |
| 292 } |
| 293 |
| 294 av_video_context()->reordered_opaque = packet.pts; |
| 295 result = avcodec_decode_video2(av_video_context(), video_buffer_.get(), |
| 296 &got_picture, &packet); |
| 297 if (!video_packets_.empty()) { |
| 298 video_packets_.pop(); |
| 299 } |
| 300 |
| 301 EXPECT_GE(result, 0) << "Video decode error."; |
| 302 if (result < 0 || got_picture == 0 && end_of_stream) { |
| 303 return false; |
| 304 } |
| 305 |
| 306 if (got_picture) { |
| 307 AVRational doubled_time_base; |
| 308 doubled_time_base.den = av_video_stream()->r_frame_rate.num; |
| 309 doubled_time_base.num = av_video_stream()->r_frame_rate.den; |
| 310 doubled_time_base.den *= 2; |
| 311 |
| 312 decoded_video_time_ = |
| 313 ConvertTimestamp(av_video_stream()->time_base, |
| 314 video_buffer_->reordered_opaque) |
| 315 .InMicroseconds(); |
| 316 decoded_video_duration_ = |
| 317 ConvertTimestamp(doubled_time_base, |
| 318 2 + video_buffer_->repeat_pict) |
| 319 .InMicroseconds(); |
| 320 return true; |
| 321 } |
| 322 } |
| 323 } |
| 324 |
| 325 void DecodeRemainingAudio() { |
| 326 while (StepDecodeAudio()) {} |
| 327 } |
| 328 |
| 329 void DecodeRemainingVideo() { |
| 330 while (StepDecodeVideo()) {} |
| 331 } |
| 332 |
| 333 void SeekTo(double position) { |
| 334 int64 seek_time = position * base::Time::kMicrosecondsPerSecond; |
| 335 int flags = AVSEEK_FLAG_BACKWARD; |
| 336 |
| 337 // Passing -1 as our stream index lets FFmpeg pick a default stream. |
| 338 // FFmpeg will attempt to use the lowest-index video stream, if present, |
| 339 // followed by the lowest-index audio stream. |
| 340 EXPECT_GE(0, av_seek_frame(av_format_context_, -1, seek_time, flags)) |
| 341 << "Failed to seek to position " << position; |
| 342 Flush(); |
| 343 } |
| 344 |
| 345 bool has_audio() { return audio_stream_index_ >= 0; } |
| 346 bool has_video() { return video_stream_index_ >= 0; } |
| 347 int64 decoded_audio_time() { return decoded_audio_time_; } |
| 348 int64 decoded_audio_duration() { return decoded_audio_duration_; } |
| 349 int64 decoded_video_time() { return decoded_video_time_; } |
| 350 int64 decoded_video_duration() { return decoded_video_duration_; } |
| 351 int64 duration() { return duration_; } |
| 352 |
| 353 AVFormatContext* av_format_context() { |
| 354 return av_format_context_; |
| 355 } |
| 356 AVStream* av_audio_stream() { |
| 357 return av_format_context_->streams[audio_stream_index_]; |
| 358 } |
| 359 AVStream* av_video_stream() { |
| 360 return av_format_context_->streams[video_stream_index_]; |
| 361 } |
| 362 AVCodecContext* av_audio_context() { |
| 363 return av_audio_stream()->codec; |
| 364 } |
| 365 AVCodecContext* av_video_context() { |
| 366 return av_video_stream()->codec; |
| 367 } |
| 368 |
| 369 private: |
| 370 void InitializeFFmpeg() { |
| 371 static bool initialized = false; |
| 372 if (initialized) { |
| 373 return; |
| 374 } |
| 375 |
| 376 FilePath path; |
| 377 PathService::Get(base::DIR_MODULE, &path); |
| 378 EXPECT_TRUE(InitializeMediaLibrary(path)) |
| 379 << "Could not initialize media library."; |
| 380 |
| 381 avcodec_init(); |
| 382 av_log_set_level(AV_LOG_FATAL); |
| 383 av_register_all(); |
| 384 av_register_protocol(&kFFmpegFileProtocol); |
| 385 initialized = true; |
| 386 } |
| 387 |
| 388 AVFormatContext* av_format_context_; |
| 389 int audio_stream_index_; |
| 390 int video_stream_index_; |
| 391 AVPacketQueue audio_packets_; |
| 392 AVPacketQueue video_packets_; |
| 393 |
| 394 scoped_ptr_malloc<int16, media::ScopedPtrAVFree> audio_buffer_; |
| 395 scoped_ptr_malloc<AVFrame, media::ScopedPtrAVFree> video_buffer_; |
| 396 |
| 397 int64 decoded_audio_time_; |
| 398 int64 decoded_audio_duration_; |
| 399 int64 decoded_video_time_; |
| 400 int64 decoded_video_duration_; |
| 401 int64 duration_; |
| 402 |
| 403 DISALLOW_COPY_AND_ASSIGN(FFmpegTest); |
| 404 }; |
| 405 |
| 406 #define FFMPEG_TEST_CASE(name, extension) \ |
| 407 INSTANTIATE_TEST_CASE_P(name##_##extension, FFmpegTest, \ |
| 408 testing::Values(#name "." #extension)); |
| 409 |
| 410 // Covers all our basic formats. |
| 411 FFMPEG_TEST_CASE(sync0, mp4); |
| 412 FFMPEG_TEST_CASE(sync0, ogv); |
| 413 FFMPEG_TEST_CASE(sync0, webm); |
| 414 FFMPEG_TEST_CASE(sync1, m4a); |
| 415 FFMPEG_TEST_CASE(sync1, mp3); |
| 416 FFMPEG_TEST_CASE(sync1, mp4); |
| 417 FFMPEG_TEST_CASE(sync1, ogg); |
| 418 FFMPEG_TEST_CASE(sync1, ogv); |
| 419 FFMPEG_TEST_CASE(sync1, webm); |
| 420 FFMPEG_TEST_CASE(sync2, m4a); |
| 421 FFMPEG_TEST_CASE(sync2, mp3); |
| 422 FFMPEG_TEST_CASE(sync2, mp4); |
| 423 FFMPEG_TEST_CASE(sync2, ogg); |
| 424 FFMPEG_TEST_CASE(sync2, ogv); |
| 425 FFMPEG_TEST_CASE(sync2, webm); |
| 426 |
| 427 // Covers our LayoutTest file. |
| 428 FFMPEG_TEST_CASE(counting, ogv); |
| 429 |
| 430 TEST_P(FFmpegTest, Perf) { |
| 431 { |
| 432 PerfTimeLogger timer("Opening file"); |
| 433 OpenFile(GetParam()); |
| 434 } |
| 435 { |
| 436 PerfTimeLogger timer("Opening codecs"); |
| 437 OpenCodecs(); |
| 438 } |
| 439 { |
| 440 PerfTimeLogger timer("Reading file"); |
| 441 ReadRemainingFile(); |
| 442 } |
| 443 if (has_audio()) { |
| 444 PerfTimeLogger timer("Decoding audio"); |
| 445 DecodeRemainingAudio(); |
| 446 } |
| 447 if (has_video()) { |
| 448 PerfTimeLogger timer("Decoding video"); |
| 449 DecodeRemainingVideo(); |
| 450 } |
| 451 { |
| 452 PerfTimeLogger timer("Seeking to zero"); |
| 453 SeekTo(0); |
| 454 } |
| 455 { |
| 456 PerfTimeLogger timer("Closing codecs"); |
| 457 CloseCodecs(); |
| 458 } |
| 459 { |
| 460 PerfTimeLogger timer("Closing file"); |
| 461 CloseFile(); |
| 462 } |
| 463 } |
| 464 |
| 465 TEST_P(FFmpegTest, Loop_Audio) { |
| 466 OpenAndReadFile(GetParam()); |
| 467 if (!has_audio()) { |
| 468 return; |
| 469 } |
| 470 |
| 471 const int kSteps = 4; |
| 472 std::vector<int64> expected_timestamps_; |
| 473 for (int i = 0; i < kSteps; ++i) { |
| 474 EXPECT_TRUE(StepDecodeAudio()); |
| 475 expected_timestamps_.push_back(decoded_audio_time()); |
| 476 } |
| 477 |
| 478 SeekTo(0); |
| 479 ReadRemainingFile(); |
| 480 |
| 481 for (int i = 0; i < kSteps; ++i) { |
| 482 EXPECT_TRUE(StepDecodeAudio()); |
| 483 EXPECT_EQ(expected_timestamps_[i], decoded_audio_time()) |
| 484 << "Frame " << i << " had a mismatched timestamp."; |
| 485 } |
| 486 |
| 487 CloseCodecs(); |
| 488 CloseFile(); |
| 489 } |
| 490 |
| 491 TEST_P(FFmpegTest, Loop_Video) { |
| 492 OpenAndReadFile(GetParam()); |
| 493 if (!has_video()) { |
| 494 return; |
| 495 } |
| 496 |
| 497 const int kSteps = 4; |
| 498 std::vector<int64> expected_timestamps_; |
| 499 for (int i = 0; i < kSteps; ++i) { |
| 500 EXPECT_TRUE(StepDecodeVideo()); |
| 501 expected_timestamps_.push_back(decoded_video_time()); |
| 502 } |
| 503 |
| 504 SeekTo(0); |
| 505 ReadRemainingFile(); |
| 506 |
| 507 for (int i = 0; i < kSteps; ++i) { |
| 508 EXPECT_TRUE(StepDecodeVideo()); |
| 509 EXPECT_EQ(expected_timestamps_[i], decoded_video_time()) |
| 510 << "Frame " << i << " had a mismatched timestamp."; |
| 511 } |
| 512 |
| 513 CloseCodecs(); |
| 514 CloseFile(); |
| 515 } |
| 516 |
| 517 TEST_P(FFmpegTest, Seek_Audio) { |
| 518 OpenAndReadFile(GetParam()); |
| 519 if (!has_audio() && duration() >= 0.5) { |
| 520 return; |
| 521 } |
| 522 |
| 523 SeekTo(duration() - 0.5); |
| 524 ReadRemainingFile(); |
| 525 |
| 526 EXPECT_TRUE(StepDecodeAudio()); |
| 527 EXPECT_NE(static_cast<int64>(AV_NOPTS_VALUE), decoded_audio_time()); |
| 528 |
| 529 CloseCodecs(); |
| 530 CloseFile(); |
| 531 } |
| 532 |
| 533 TEST_P(FFmpegTest, Seek_Video) { |
| 534 OpenAndReadFile(GetParam()); |
| 535 if (!has_video() && duration() >= 0.5) { |
| 536 return; |
| 537 } |
| 538 |
| 539 SeekTo(duration() - 0.5); |
| 540 ReadRemainingFile(); |
| 541 |
| 542 EXPECT_TRUE(StepDecodeVideo()); |
| 543 EXPECT_NE(static_cast<int64>(AV_NOPTS_VALUE), decoded_video_time()); |
| 544 |
| 545 CloseCodecs(); |
| 546 CloseFile(); |
| 547 } |
| 548 |
| 549 TEST_P(FFmpegTest, Decode_Audio) { |
| 550 OpenAndReadFile(GetParam()); |
| 551 if (!has_audio()) { |
| 552 return; |
| 553 } |
| 554 |
| 555 int64 last_audio_time = AV_NOPTS_VALUE; |
| 556 while (StepDecodeAudio()) { |
| 557 ASSERT_GT(decoded_audio_time(), last_audio_time); |
| 558 last_audio_time = decoded_audio_time(); |
| 559 } |
| 560 |
| 561 CloseCodecs(); |
| 562 CloseFile(); |
| 563 } |
| 564 |
| 565 TEST_P(FFmpegTest, Decode_Video) { |
| 566 OpenAndReadFile(GetParam()); |
| 567 if (!has_video()) { |
| 568 return; |
| 569 } |
| 570 |
| 571 int64 last_video_time = AV_NOPTS_VALUE; |
| 572 while (StepDecodeVideo()) { |
| 573 ASSERT_GT(decoded_video_time(), last_video_time); |
| 574 last_video_time = decoded_video_time(); |
| 575 } |
| 576 |
| 577 CloseCodecs(); |
| 578 CloseFile(); |
| 579 } |
| 580 |
| 581 TEST_P(FFmpegTest, Duration) { |
| 582 OpenAndReadFile(GetParam()); |
| 583 |
| 584 if (has_audio()) { |
| 585 DecodeRemainingAudio(); |
| 586 } |
| 587 |
| 588 if (has_video()) { |
| 589 DecodeRemainingVideo(); |
| 590 } |
| 591 |
| 592 int64 max_time = std::max(decoded_audio_time() + decoded_audio_duration(), |
| 593 decoded_video_time() + decoded_video_duration()); |
| 594 int64 delta = abs(max_time - duration()); |
| 595 |
| 596 EXPECT_LE(delta, 500000) << "Duration is off by more than 0.5 seconds."; |
| 597 |
| 598 CloseCodecs(); |
| 599 CloseFile(); |
| 600 } |
| 601 |
| 602 TEST_F(FFmpegTest, VideoPlayedCollapse) { |
| 603 OpenFile("test.ogv"); |
| 604 OpenCodecs(); |
| 605 |
| 606 SeekTo(0.5); |
| 607 ReadRemainingFile(); |
| 608 EXPECT_TRUE(StepDecodeVideo()); |
| 609 LOG(INFO) << decoded_video_time(); |
| 610 |
| 611 SeekTo(2.83); |
| 612 ReadRemainingFile(); |
| 613 EXPECT_TRUE(StepDecodeVideo()); |
| 614 LOG(INFO) << decoded_video_time(); |
| 615 |
| 616 SeekTo(0.4); |
| 617 ReadRemainingFile(); |
| 618 EXPECT_TRUE(StepDecodeVideo()); |
| 619 LOG(INFO) << decoded_video_time(); |
| 620 |
| 621 CloseCodecs(); |
| 622 CloseFile(); |
| 623 } |
| 624 |
| 625 } // namespace media |
OLD | NEW |