OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // Test application that simulates a cast sender - Data can be either generated | 5 // Test application that simulates a cast sender - Data can be either generated |
6 // or read from a file. | 6 // or read from a file. |
7 | 7 |
8 #include <queue> | 8 #include <queue> |
9 | 9 |
10 #include "base/at_exit.h" | 10 #include "base/at_exit.h" |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
148 synthetic_count_(0), | 148 synthetic_count_(0), |
149 clock_(clock), | 149 clock_(clock), |
150 audio_frame_count_(0), | 150 audio_frame_count_(0), |
151 video_frame_count_(0), | 151 video_frame_count_(0), |
152 weak_factory_(this), | 152 weak_factory_(this), |
153 av_format_context_(NULL), | 153 av_format_context_(NULL), |
154 audio_stream_index_(-1), | 154 audio_stream_index_(-1), |
155 playback_rate_(1.0), | 155 playback_rate_(1.0), |
156 video_stream_index_(-1), | 156 video_stream_index_(-1), |
157 video_frame_rate_numerator_(video_config.max_frame_rate), | 157 video_frame_rate_numerator_(video_config.max_frame_rate), |
158 video_frame_rate_denominator_(1) { | 158 video_frame_rate_denominator_(1), |
| 159 video_first_pts_(0), |
| 160 video_first_pts_set_(false) { |
159 audio_bus_factory_.reset(new TestAudioBusFactory(kAudioChannels, | 161 audio_bus_factory_.reset(new TestAudioBusFactory(kAudioChannels, |
160 kAudioSamplingFrequency, | 162 kAudioSamplingFrequency, |
161 kSoundFrequency, | 163 kSoundFrequency, |
162 kSoundVolume)); | 164 kSoundVolume)); |
163 const CommandLine* cmd = CommandLine::ForCurrentProcess(); | 165 const CommandLine* cmd = CommandLine::ForCurrentProcess(); |
164 int override_fps = 0; | 166 int override_fps = 0; |
165 if (base::StringToInt(cmd->GetSwitchValueASCII(kSwitchFps), | 167 if (base::StringToInt(cmd->GetSwitchValueASCII(kSwitchFps), |
166 &override_fps)) { | 168 &override_fps)) { |
167 video_config_.max_frame_rate = override_fps; | 169 video_config_.max_frame_rate = override_fps; |
168 video_frame_rate_numerator_ = override_fps; | 170 video_frame_rate_numerator_ = override_fps; |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
281 void Start(scoped_refptr<AudioFrameInput> audio_frame_input, | 283 void Start(scoped_refptr<AudioFrameInput> audio_frame_input, |
282 scoped_refptr<VideoFrameInput> video_frame_input) { | 284 scoped_refptr<VideoFrameInput> video_frame_input) { |
283 audio_frame_input_ = audio_frame_input; | 285 audio_frame_input_ = audio_frame_input; |
284 video_frame_input_ = video_frame_input; | 286 video_frame_input_ = video_frame_input; |
285 | 287 |
286 LOG(INFO) << "Max Frame rate: " << video_config_.max_frame_rate; | 288 LOG(INFO) << "Max Frame rate: " << video_config_.max_frame_rate; |
287 LOG(INFO) << "Real Frame rate: " | 289 LOG(INFO) << "Real Frame rate: " |
288 << video_frame_rate_numerator_ << "/" | 290 << video_frame_rate_numerator_ << "/" |
289 << video_frame_rate_denominator_ << " fps."; | 291 << video_frame_rate_denominator_ << " fps."; |
290 LOG(INFO) << "Audio playback rate: " << playback_rate_; | 292 LOG(INFO) << "Audio playback rate: " << playback_rate_; |
| 293 |
| 294 if (!is_transcoding_audio() && !is_transcoding_video()) { |
| 295 // Send fake patterns. |
| 296 test_app_thread_proxy_->PostTask( |
| 297 FROM_HERE, |
| 298 base::Bind( |
| 299 &SendProcess::SendNextFakeFrame, |
| 300 base::Unretained(this))); |
| 301 return; |
| 302 } |
| 303 |
| 304 // Send transcoding streams. |
291 audio_algo_.Initialize(playback_rate_, audio_params_); | 305 audio_algo_.Initialize(playback_rate_, audio_params_); |
292 audio_algo_.FlushBuffers(); | 306 audio_algo_.FlushBuffers(); |
293 audio_fifo_input_bus_ = | 307 audio_fifo_input_bus_ = |
294 AudioBus::Create( | 308 AudioBus::Create( |
295 audio_params_.channels(), audio_params_.frames_per_buffer()); | 309 audio_params_.channels(), audio_params_.frames_per_buffer()); |
296 // Audio FIFO can carry all data fron AudioRendererAlgorithm. | 310 // Audio FIFO can carry all data fron AudioRendererAlgorithm. |
297 audio_fifo_.reset( | 311 audio_fifo_.reset( |
298 new AudioFifo(audio_params_.channels(), | 312 new AudioFifo(audio_params_.channels(), |
299 audio_algo_.QueueCapacity())); | 313 audio_algo_.QueueCapacity())); |
300 audio_resampler_.reset(new media::MultiChannelResampler( | 314 audio_resampler_.reset(new media::MultiChannelResampler( |
301 audio_params_.channels(), | 315 audio_params_.channels(), |
302 static_cast<double>(audio_params_.sample_rate()) / | 316 static_cast<double>(audio_params_.sample_rate()) / |
303 kAudioSamplingFrequency, | 317 kAudioSamplingFrequency, |
304 audio_params_.frames_per_buffer(), | 318 audio_params_.frames_per_buffer(), |
305 base::Bind(&SendProcess::ProvideData, base::Unretained(this)))); | 319 base::Bind(&SendProcess::ProvideData, base::Unretained(this)))); |
306 audio_decoded_ts_.reset( | |
307 new AudioTimestampHelper(audio_params_.sample_rate())); | |
308 audio_decoded_ts_->SetBaseTimestamp(base::TimeDelta()); | |
309 audio_scaled_ts_.reset( | |
310 new AudioTimestampHelper(audio_params_.sample_rate())); | |
311 audio_scaled_ts_->SetBaseTimestamp(base::TimeDelta()); | |
312 audio_resampled_ts_.reset( | |
313 new AudioTimestampHelper(kAudioSamplingFrequency)); | |
314 audio_resampled_ts_->SetBaseTimestamp(base::TimeDelta()); | |
315 test_app_thread_proxy_->PostTask( | 320 test_app_thread_proxy_->PostTask( |
316 FROM_HERE, | 321 FROM_HERE, |
317 base::Bind(&SendProcess::SendNextFrame, base::Unretained(this))); | 322 base::Bind( |
| 323 &SendProcess::SendNextFrame, |
| 324 base::Unretained(this))); |
318 } | 325 } |
319 | 326 |
320 void SendNextFrame() { | 327 void SendNextFakeFrame() { |
321 gfx::Size size(video_config_.width, video_config_.height); | 328 gfx::Size size(video_config_.width, video_config_.height); |
322 scoped_refptr<VideoFrame> video_frame = | 329 scoped_refptr<VideoFrame> video_frame = |
323 VideoFrame::CreateBlackFrame(size); | 330 VideoFrame::CreateBlackFrame(size); |
324 if (is_transcoding_video()) { | 331 PopulateVideoFrame(video_frame, synthetic_count_); |
325 Decode(false); | |
326 CHECK(!video_frame_queue_.empty()) << "No video frame."; | |
327 scoped_refptr<VideoFrame> decoded_frame = | |
328 video_frame_queue_.front(); | |
329 video_frame->set_timestamp(decoded_frame->timestamp()); | |
330 video_frame_queue_.pop(); | |
331 media::CopyPlane(VideoFrame::kYPlane, | |
332 decoded_frame->data(VideoFrame::kYPlane), | |
333 decoded_frame->stride(VideoFrame::kYPlane), | |
334 decoded_frame->rows(VideoFrame::kYPlane), | |
335 video_frame); | |
336 media::CopyPlane(VideoFrame::kUPlane, | |
337 decoded_frame->data(VideoFrame::kUPlane), | |
338 decoded_frame->stride(VideoFrame::kUPlane), | |
339 decoded_frame->rows(VideoFrame::kUPlane), | |
340 video_frame); | |
341 media::CopyPlane(VideoFrame::kVPlane, | |
342 decoded_frame->data(VideoFrame::kVPlane), | |
343 decoded_frame->stride(VideoFrame::kVPlane), | |
344 decoded_frame->rows(VideoFrame::kVPlane), | |
345 video_frame); | |
346 } else { | |
347 PopulateVideoFrame(video_frame, synthetic_count_); | |
348 } | |
349 ++synthetic_count_; | 332 ++synthetic_count_; |
350 | 333 |
351 base::TimeTicks now = clock_->NowTicks(); | 334 base::TimeTicks now = clock_->NowTicks(); |
352 if (start_time_.is_null()) | 335 if (start_time_.is_null()) |
353 start_time_ = now; | 336 start_time_ = now; |
354 | 337 |
355 base::TimeDelta video_time; | 338 base::TimeDelta video_time = VideoFrameTime(video_frame_count_); |
356 if (is_transcoding_video()) { | |
357 // Use the timestamp from the file if we're transcoding and | |
358 // playback rate is 1.0. | |
359 video_time = ScaleTimestamp(video_frame->timestamp()); | |
360 } else { | |
361 VideoFrameTime(video_frame_count_); | |
362 } | |
363 | |
364 video_frame->set_timestamp(video_time); | 339 video_frame->set_timestamp(video_time); |
365 video_frame_input_->InsertRawVideoFrame(video_frame, | 340 video_frame_input_->InsertRawVideoFrame(video_frame, |
366 start_time_ + video_time); | 341 start_time_ + video_time); |
367 | 342 |
368 if (is_transcoding_video()) { | |
369 // Decode next video frame to get the next frame's timestamp. | |
370 Decode(false); | |
371 CHECK(!video_frame_queue_.empty()) << "No video frame."; | |
372 video_time = ScaleTimestamp(video_frame_queue_.front()->timestamp()); | |
373 } else { | |
374 video_time = VideoFrameTime(++video_frame_count_); | |
375 } | |
376 | |
377 // Send just enough audio data to match next video frame's time. | 343 // Send just enough audio data to match next video frame's time. |
378 base::TimeDelta audio_time = AudioFrameTime(audio_frame_count_); | 344 base::TimeDelta audio_time = AudioFrameTime(audio_frame_count_); |
379 while (audio_time < video_time) { | 345 while (audio_time < video_time) { |
380 if (is_transcoding_audio()) { | 346 if (is_transcoding_audio()) { |
381 Decode(true); | 347 Decode(true); |
382 CHECK(!audio_bus_queue_.empty()) << "No audio decoded."; | 348 CHECK(!audio_bus_queue_.empty()) << "No audio decoded."; |
383 scoped_ptr<AudioBus> bus(audio_bus_queue_.front()); | 349 scoped_ptr<AudioBus> bus(audio_bus_queue_.front()); |
384 audio_bus_queue_.pop(); | 350 audio_bus_queue_.pop(); |
385 audio_frame_input_->InsertAudio( | 351 audio_frame_input_->InsertAudio( |
386 bus.Pass(), start_time_ + audio_time); | 352 bus.Pass(), start_time_ + audio_time); |
387 } else { | 353 } else { |
388 audio_frame_input_->InsertAudio( | 354 audio_frame_input_->InsertAudio( |
389 audio_bus_factory_->NextAudioBus( | 355 audio_bus_factory_->NextAudioBus( |
390 base::TimeDelta::FromMilliseconds(kAudioFrameMs)), | 356 base::TimeDelta::FromMilliseconds(kAudioFrameMs)), |
391 start_time_ + audio_time); | 357 start_time_ + audio_time); |
392 } | 358 } |
393 audio_time = AudioFrameTime(++audio_frame_count_); | 359 audio_time = AudioFrameTime(++audio_frame_count_); |
394 } | 360 } |
395 | 361 |
396 // This is the time since the stream started. | 362 // This is the time since the stream started. |
397 const base::TimeDelta elapsed_time = now - start_time_; | 363 const base::TimeDelta elapsed_time = now - start_time_; |
398 | 364 |
399 // Handle the case when decoding or frame generation cannot keep up. | 365 // Handle the case when frame generation cannot keep up. |
400 // Move the time ahead to match the next frame. | 366 // Move the time ahead to match the next frame. |
401 while (video_time < elapsed_time) { | 367 while (video_time < elapsed_time) { |
402 LOG(WARNING) << "Skipping one frame."; | 368 LOG(WARNING) << "Skipping one frame."; |
403 video_time = VideoFrameTime(++video_frame_count_); | 369 video_time = VideoFrameTime(++video_frame_count_); |
404 } | 370 } |
405 | 371 |
406 test_app_thread_proxy_->PostDelayedTask( | 372 test_app_thread_proxy_->PostDelayedTask( |
407 FROM_HERE, | 373 FROM_HERE, |
408 base::Bind(&SendProcess::SendNextFrame, | 374 base::Bind(&SendProcess::SendNextFakeFrame, |
409 weak_factory_.GetWeakPtr()), | 375 weak_factory_.GetWeakPtr()), |
410 video_time - elapsed_time); | 376 video_time - elapsed_time); |
411 } | 377 } |
412 | 378 |
| 379 // Return true if a frame was sent. |
| 380 bool SendNextTranscodedVideo(base::TimeDelta elapsed_time) { |
| 381 if (!is_transcoding_video()) |
| 382 return false; |
| 383 |
| 384 Decode(false); |
| 385 if (video_frame_queue_.empty()) |
| 386 return false; |
| 387 |
| 388 scoped_refptr<VideoFrame> decoded_frame = |
| 389 video_frame_queue_.front(); |
| 390 if (elapsed_time < decoded_frame->timestamp()) |
| 391 return false; |
| 392 |
| 393 gfx::Size size(video_config_.width, video_config_.height); |
| 394 scoped_refptr<VideoFrame> video_frame = |
| 395 VideoFrame::CreateBlackFrame(size); |
| 396 video_frame_queue_.pop(); |
| 397 media::CopyPlane(VideoFrame::kYPlane, |
| 398 decoded_frame->data(VideoFrame::kYPlane), |
| 399 decoded_frame->stride(VideoFrame::kYPlane), |
| 400 decoded_frame->rows(VideoFrame::kYPlane), |
| 401 video_frame); |
| 402 media::CopyPlane(VideoFrame::kUPlane, |
| 403 decoded_frame->data(VideoFrame::kUPlane), |
| 404 decoded_frame->stride(VideoFrame::kUPlane), |
| 405 decoded_frame->rows(VideoFrame::kUPlane), |
| 406 video_frame); |
| 407 media::CopyPlane(VideoFrame::kVPlane, |
| 408 decoded_frame->data(VideoFrame::kVPlane), |
| 409 decoded_frame->stride(VideoFrame::kVPlane), |
| 410 decoded_frame->rows(VideoFrame::kVPlane), |
| 411 video_frame); |
| 412 |
| 413 base::TimeDelta video_time; |
| 414 // Use the timestamp from the file if we're transcoding. |
| 415 video_time = ScaleTimestamp(decoded_frame->timestamp()); |
| 416 video_frame_input_->InsertRawVideoFrame( |
| 417 video_frame, start_time_ + video_time); |
| 418 |
| 419 // Make sure queue is not empty. |
| 420 Decode(false); |
| 421 return true; |
| 422 } |
| 423 |
| 424 // Return true if a frame was sent. |
| 425 bool SendNextTranscodedAudio(base::TimeDelta elapsed_time) { |
| 426 if (!is_transcoding_audio()) |
| 427 return false; |
| 428 |
| 429 Decode(true); |
| 430 if (audio_bus_queue_.empty()) |
| 431 return false; |
| 432 |
| 433 base::TimeDelta audio_time = audio_sent_ts_->GetTimestamp(); |
| 434 if (elapsed_time < audio_time) |
| 435 return false; |
| 436 scoped_ptr<AudioBus> bus(audio_bus_queue_.front()); |
| 437 audio_bus_queue_.pop(); |
| 438 audio_sent_ts_->AddFrames(bus->frames()); |
| 439 audio_frame_input_->InsertAudio( |
| 440 bus.Pass(), start_time_ + audio_time); |
| 441 |
| 442 // Make sure queue is not empty. |
| 443 Decode(true); |
| 444 return true; |
| 445 } |
| 446 |
| 447 void SendNextFrame() { |
| 448 if (start_time_.is_null()) |
| 449 start_time_ = clock_->NowTicks(); |
| 450 if (start_time_.is_null()) |
| 451 start_time_ = clock_->NowTicks(); |
| 452 |
| 453 // Send as much as possible. Audio is sent according to |
| 454 // system time. |
| 455 while (SendNextTranscodedAudio(clock_->NowTicks() - start_time_)); |
| 456 |
| 457 // Video is sync'ed to audio. |
| 458 while (SendNextTranscodedVideo(audio_sent_ts_->GetTimestamp())); |
| 459 |
| 460 if (audio_bus_queue_.empty() && video_frame_queue_.empty()) { |
| 461 // Both queues are empty can only mean that we have reached |
| 462 // the end of the stream. |
| 463 LOG(INFO) << "Rewind."; |
| 464 Rewind(); |
| 465 start_time_ = base::TimeTicks(); |
| 466 audio_sent_ts_.reset(); |
| 467 video_first_pts_set_ = false; |
| 468 } |
| 469 |
| 470 // Send next send. |
| 471 test_app_thread_proxy_->PostDelayedTask( |
| 472 FROM_HERE, |
| 473 base::Bind( |
| 474 &SendProcess::SendNextFrame, |
| 475 base::Unretained(this)), |
| 476 base::TimeDelta::FromMilliseconds(kAudioFrameMs)); |
| 477 } |
| 478 |
413 const VideoSenderConfig& get_video_config() const { return video_config_; } | 479 const VideoSenderConfig& get_video_config() const { return video_config_; } |
414 | 480 |
415 private: | 481 private: |
416 bool is_transcoding_audio() { return audio_stream_index_ >= 0; } | 482 bool is_transcoding_audio() { return audio_stream_index_ >= 0; } |
417 bool is_transcoding_video() { return video_stream_index_ >= 0; } | 483 bool is_transcoding_video() { return video_stream_index_ >= 0; } |
418 | 484 |
419 // Helper methods to compute timestamps for the frame number specified. | 485 // Helper methods to compute timestamps for the frame number specified. |
420 base::TimeDelta VideoFrameTime(int frame_number) { | 486 base::TimeDelta VideoFrameTime(int frame_number) { |
421 return frame_number * base::TimeDelta::FromSeconds(1) * | 487 return frame_number * base::TimeDelta::FromSeconds(1) * |
422 video_frame_rate_denominator_ / video_frame_rate_numerator_; | 488 video_frame_rate_denominator_ / video_frame_rate_numerator_; |
(...skipping 11 matching lines...) Expand all Loading... |
434 // Go to the beginning of the stream. | 500 // Go to the beginning of the stream. |
435 void Rewind() { | 501 void Rewind() { |
436 CHECK(av_seek_frame(av_format_context_, -1, 0, AVSEEK_FLAG_BACKWARD) >= 0) | 502 CHECK(av_seek_frame(av_format_context_, -1, 0, AVSEEK_FLAG_BACKWARD) >= 0) |
437 << "Failed to rewind to the beginning."; | 503 << "Failed to rewind to the beginning."; |
438 } | 504 } |
439 | 505 |
440 // Call FFmpeg to fetch one packet. | 506 // Call FFmpeg to fetch one packet. |
441 ScopedAVPacket DemuxOnePacket(bool* audio) { | 507 ScopedAVPacket DemuxOnePacket(bool* audio) { |
442 ScopedAVPacket packet(new AVPacket()); | 508 ScopedAVPacket packet(new AVPacket()); |
443 if (av_read_frame(av_format_context_, packet.get()) < 0) { | 509 if (av_read_frame(av_format_context_, packet.get()) < 0) { |
444 LOG(ERROR) << "Failed to read one AVPacket"; | 510 LOG(ERROR) << "Failed to read one AVPacket."; |
445 packet.reset(); | 511 packet.reset(); |
446 return packet.Pass(); | 512 return packet.Pass(); |
447 } | 513 } |
448 | 514 |
449 int stream_index = static_cast<int>(packet->stream_index); | 515 int stream_index = static_cast<int>(packet->stream_index); |
450 if (stream_index == audio_stream_index_) { | 516 if (stream_index == audio_stream_index_) { |
451 *audio = true; | 517 *audio = true; |
452 } else if (stream_index == video_stream_index_) { | 518 } else if (stream_index == video_stream_index_) { |
453 *audio = false; | 519 *audio = false; |
454 } else { | 520 } else { |
(...skipping 19 matching lines...) Expand all Loading... |
474 CHECK(result >= 0) << "Failed to decode audio."; | 540 CHECK(result >= 0) << "Failed to decode audio."; |
475 packet_temp.size -= result; | 541 packet_temp.size -= result; |
476 packet_temp.data += result; | 542 packet_temp.data += result; |
477 if (!frame_decoded) | 543 if (!frame_decoded) |
478 continue; | 544 continue; |
479 | 545 |
480 int frames_read = avframe->nb_samples; | 546 int frames_read = avframe->nb_samples; |
481 if (frames_read < 0) | 547 if (frames_read < 0) |
482 break; | 548 break; |
483 | 549 |
| 550 if (!audio_sent_ts_) { |
| 551 // Initialize the base time to the first packet in the file. |
| 552 // This is set to the frequency we send to the receiver. |
| 553 // Not the frequency of the source file. This is because we |
| 554 // increment the frame count by samples we sent. |
| 555 audio_sent_ts_.reset( |
| 556 new AudioTimestampHelper(kAudioSamplingFrequency)); |
| 557 // For some files this is an invalid value. |
| 558 base::TimeDelta base_ts; |
| 559 audio_sent_ts_->SetBaseTimestamp(base_ts); |
| 560 } |
| 561 |
484 scoped_refptr<AudioBuffer> buffer = | 562 scoped_refptr<AudioBuffer> buffer = |
485 AudioBuffer::CopyFrom( | 563 AudioBuffer::CopyFrom( |
486 AVSampleFormatToSampleFormat( | 564 AVSampleFormatToSampleFormat( |
487 av_audio_context()->sample_fmt), | 565 av_audio_context()->sample_fmt), |
488 ChannelLayoutToChromeChannelLayout( | 566 ChannelLayoutToChromeChannelLayout( |
489 av_audio_context()->channel_layout, | 567 av_audio_context()->channel_layout, |
490 av_audio_context()->channels), | 568 av_audio_context()->channels), |
491 av_audio_context()->channels, | 569 av_audio_context()->channels, |
492 av_audio_context()->sample_rate, | 570 av_audio_context()->sample_rate, |
493 frames_read, | 571 frames_read, |
494 &avframe->data[0], | 572 &avframe->data[0], |
495 audio_decoded_ts_->GetTimestamp(), | 573 // Note: Not all files have correct values for pkt_pts. |
496 audio_decoded_ts_->GetFrameDuration(frames_read)); | 574 base::TimeDelta::FromMilliseconds(avframe->pkt_pts), |
| 575 // TODO(hclam): Give accurate duration based on samples. |
| 576 base::TimeDelta()); |
497 audio_algo_.EnqueueBuffer(buffer); | 577 audio_algo_.EnqueueBuffer(buffer); |
498 audio_decoded_ts_->AddFrames(frames_read); | |
499 } while (packet_temp.size > 0); | 578 } while (packet_temp.size > 0); |
500 avcodec_free_frame(&avframe); | 579 avcodec_free_frame(&avframe); |
501 | 580 |
502 const int frames_needed_to_scale = | 581 const int frames_needed_to_scale = |
503 playback_rate_ * av_audio_context()->sample_rate / | 582 playback_rate_ * av_audio_context()->sample_rate / |
504 kAudioPacketsPerSecond; | 583 kAudioPacketsPerSecond; |
505 while (frames_needed_to_scale <= audio_algo_.frames_buffered()) { | 584 while (frames_needed_to_scale <= audio_algo_.frames_buffered()) { |
506 if (!audio_algo_.FillBuffer(audio_fifo_input_bus_.get(), | 585 if (!audio_algo_.FillBuffer(audio_fifo_input_bus_.get(), |
507 audio_fifo_input_bus_->frames())) { | 586 audio_fifo_input_bus_->frames())) { |
508 // Nothing can be scaled. Decode some more. | 587 // Nothing can be scaled. Decode some more. |
509 return; | 588 return; |
510 } | 589 } |
511 audio_scaled_ts_->AddFrames(audio_fifo_input_bus_->frames()); | |
512 | 590 |
513 // Prevent overflow of audio data in the FIFO. | 591 // Prevent overflow of audio data in the FIFO. |
514 if (audio_fifo_input_bus_->frames() + audio_fifo_->frames() | 592 if (audio_fifo_input_bus_->frames() + audio_fifo_->frames() |
515 <= audio_fifo_->max_frames()) { | 593 <= audio_fifo_->max_frames()) { |
516 audio_fifo_->Push(audio_fifo_input_bus_.get()); | 594 audio_fifo_->Push(audio_fifo_input_bus_.get()); |
517 } else { | 595 } else { |
518 LOG(WARNING) << "Audio FIFO full; dropping samples."; | 596 LOG(WARNING) << "Audio FIFO full; dropping samples."; |
519 } | 597 } |
520 | 598 |
521 // Make sure there's enough data to resample audio. | 599 // Make sure there's enough data to resample audio. |
522 if (audio_fifo_->frames() < | 600 if (audio_fifo_->frames() < |
523 2 * audio_params_.sample_rate() / kAudioPacketsPerSecond) { | 601 2 * audio_params_.sample_rate() / kAudioPacketsPerSecond) { |
524 continue; | 602 continue; |
525 } | 603 } |
526 | 604 |
527 scoped_ptr<media::AudioBus> resampled_bus( | 605 scoped_ptr<media::AudioBus> resampled_bus( |
528 media::AudioBus::Create( | 606 media::AudioBus::Create( |
529 audio_params_.channels(), | 607 audio_params_.channels(), |
530 kAudioSamplingFrequency / kAudioPacketsPerSecond)); | 608 kAudioSamplingFrequency / kAudioPacketsPerSecond)); |
531 audio_resampler_->Resample(resampled_bus->frames(), | 609 audio_resampler_->Resample(resampled_bus->frames(), |
532 resampled_bus.get()); | 610 resampled_bus.get()); |
533 audio_resampled_ts_->AddFrames(resampled_bus->frames()); | |
534 audio_bus_queue_.push(resampled_bus.release()); | 611 audio_bus_queue_.push(resampled_bus.release()); |
535 } | 612 } |
536 } | 613 } |
537 | 614 |
538 void DecodeVideo(ScopedAVPacket packet) { | 615 void DecodeVideo(ScopedAVPacket packet) { |
539 // Video. | 616 // Video. |
540 int got_picture; | 617 int got_picture; |
541 AVFrame* avframe = av_frame_alloc(); | 618 AVFrame* avframe = av_frame_alloc(); |
542 avcodec_get_frame_defaults(avframe); | 619 avcodec_get_frame_defaults(avframe); |
543 // Tell the decoder to reorder for us. | 620 // Tell the decoder to reorder for us. |
544 avframe->reordered_opaque = | 621 avframe->reordered_opaque = |
545 av_video_context()->reordered_opaque = packet->pts; | 622 av_video_context()->reordered_opaque = packet->pts; |
546 CHECK(avcodec_decode_video2( | 623 CHECK(avcodec_decode_video2( |
547 av_video_context(), avframe, &got_picture, packet.get()) >= 0) | 624 av_video_context(), avframe, &got_picture, packet.get()) >= 0) |
548 << "Video decode error."; | 625 << "Video decode error."; |
549 if (!got_picture) | 626 if (!got_picture) |
550 return; | 627 return; |
551 gfx::Size size(av_video_context()->width, av_video_context()->height); | 628 gfx::Size size(av_video_context()->width, av_video_context()->height); |
| 629 if (!video_first_pts_set_ || |
| 630 avframe->reordered_opaque < video_first_pts_) { |
| 631 video_first_pts_set_ = true; |
| 632 video_first_pts_ = avframe->reordered_opaque; |
| 633 } |
| 634 int64 pts = avframe->reordered_opaque - video_first_pts_; |
552 video_frame_queue_.push( | 635 video_frame_queue_.push( |
553 VideoFrame::WrapExternalYuvData( | 636 VideoFrame::WrapExternalYuvData( |
554 media::VideoFrame::YV12, | 637 media::VideoFrame::YV12, |
555 size, | 638 size, |
556 gfx::Rect(size), | 639 gfx::Rect(size), |
557 size, | 640 size, |
558 avframe->linesize[0], | 641 avframe->linesize[0], |
559 avframe->linesize[1], | 642 avframe->linesize[1], |
560 avframe->linesize[2], | 643 avframe->linesize[2], |
561 avframe->data[0], | 644 avframe->data[0], |
562 avframe->data[1], | 645 avframe->data[1], |
563 avframe->data[2], | 646 avframe->data[2], |
564 base::TimeDelta::FromMilliseconds(avframe->reordered_opaque), | 647 base::TimeDelta::FromMilliseconds(pts), |
565 base::Bind(&AVFreeFrame, avframe))); | 648 base::Bind(&AVFreeFrame, avframe))); |
566 } | 649 } |
567 | 650 |
568 void Decode(bool decode_audio) { | 651 void Decode(bool decode_audio) { |
569 // Read the stream until one video frame can be decoded. | 652 // Read the stream until one video frame can be decoded. |
570 while (true) { | 653 while (true) { |
571 if (decode_audio && !audio_bus_queue_.empty()) | 654 if (decode_audio && !audio_bus_queue_.empty()) |
572 return; | 655 return; |
573 if (!decode_audio && !video_frame_queue_.empty()) | 656 if (!decode_audio && !video_frame_queue_.empty()) |
574 return; | 657 return; |
575 | 658 |
576 bool audio_packet = false; | 659 bool audio_packet = false; |
577 ScopedAVPacket packet = DemuxOnePacket(&audio_packet); | 660 ScopedAVPacket packet = DemuxOnePacket(&audio_packet); |
578 if (!packet) { | 661 if (!packet) { |
579 LOG(INFO) << "End of stream; Rewind."; | 662 LOG(INFO) << "End of stream."; |
580 Rewind(); | 663 return; |
581 continue; | |
582 } | 664 } |
583 | 665 |
584 if (audio_packet) | 666 if (audio_packet) |
585 DecodeAudio(packet.Pass()); | 667 DecodeAudio(packet.Pass()); |
586 else | 668 else |
587 DecodeVideo(packet.Pass()); | 669 DecodeVideo(packet.Pass()); |
588 } | 670 } |
589 } | 671 } |
590 | 672 |
591 void ProvideData(int frame_delay, media::AudioBus* output_bus) { | 673 void ProvideData(int frame_delay, media::AudioBus* output_bus) { |
(...skipping 13 matching lines...) Expand all Loading... |
605 } | 687 } |
606 AVCodecContext* av_audio_context() { return av_audio_stream()->codec; } | 688 AVCodecContext* av_audio_context() { return av_audio_stream()->codec; } |
607 AVCodecContext* av_video_context() { return av_video_stream()->codec; } | 689 AVCodecContext* av_video_context() { return av_video_stream()->codec; } |
608 | 690 |
609 scoped_refptr<base::SingleThreadTaskRunner> test_app_thread_proxy_; | 691 scoped_refptr<base::SingleThreadTaskRunner> test_app_thread_proxy_; |
610 VideoSenderConfig video_config_; | 692 VideoSenderConfig video_config_; |
611 scoped_refptr<AudioFrameInput> audio_frame_input_; | 693 scoped_refptr<AudioFrameInput> audio_frame_input_; |
612 scoped_refptr<VideoFrameInput> video_frame_input_; | 694 scoped_refptr<VideoFrameInput> video_frame_input_; |
613 uint8 synthetic_count_; | 695 uint8 synthetic_count_; |
614 base::TickClock* const clock_; // Not owned by this class. | 696 base::TickClock* const clock_; // Not owned by this class. |
| 697 |
| 698 // Time when the stream starts. |
615 base::TimeTicks start_time_; | 699 base::TimeTicks start_time_; |
| 700 |
| 701 // The following three members are used only for fake frames. |
616 int audio_frame_count_; // Each audio frame is exactly 10ms. | 702 int audio_frame_count_; // Each audio frame is exactly 10ms. |
617 int video_frame_count_; | 703 int video_frame_count_; |
618 scoped_ptr<TestAudioBusFactory> audio_bus_factory_; | 704 scoped_ptr<TestAudioBusFactory> audio_bus_factory_; |
619 | 705 |
620 // NOTE: Weak pointers must be invalidated before all other member variables. | 706 // NOTE: Weak pointers must be invalidated before all other member variables. |
621 base::WeakPtrFactory<SendProcess> weak_factory_; | 707 base::WeakPtrFactory<SendProcess> weak_factory_; |
622 | 708 |
623 base::MemoryMappedFile file_data_; | 709 base::MemoryMappedFile file_data_; |
624 scoped_ptr<InMemoryUrlProtocol> protocol_; | 710 scoped_ptr<InMemoryUrlProtocol> protocol_; |
625 scoped_ptr<FFmpegGlue> glue_; | 711 scoped_ptr<FFmpegGlue> glue_; |
626 AVFormatContext* av_format_context_; | 712 AVFormatContext* av_format_context_; |
627 | 713 |
628 int audio_stream_index_; | 714 int audio_stream_index_; |
629 AudioParameters audio_params_; | 715 AudioParameters audio_params_; |
630 double playback_rate_; | 716 double playback_rate_; |
631 | 717 |
632 int video_stream_index_; | 718 int video_stream_index_; |
633 int video_frame_rate_numerator_; | 719 int video_frame_rate_numerator_; |
634 int video_frame_rate_denominator_; | 720 int video_frame_rate_denominator_; |
635 | 721 |
636 // These are used for audio resampling. | 722 // These are used for audio resampling. |
637 scoped_ptr<media::MultiChannelResampler> audio_resampler_; | 723 scoped_ptr<media::MultiChannelResampler> audio_resampler_; |
638 scoped_ptr<media::AudioFifo> audio_fifo_; | 724 scoped_ptr<media::AudioFifo> audio_fifo_; |
639 scoped_ptr<media::AudioBus> audio_fifo_input_bus_; | 725 scoped_ptr<media::AudioBus> audio_fifo_input_bus_; |
640 media::AudioRendererAlgorithm audio_algo_; | 726 media::AudioRendererAlgorithm audio_algo_; |
641 | 727 |
642 // These helpers are used to track frames generated. | 728 // Track the timestamp of audio sent to the receiver. |
643 // They are: | 729 scoped_ptr<media::AudioTimestampHelper> audio_sent_ts_; |
644 // * Frames decoded from the file. | |
645 // * Frames scaled according to playback rate. | |
646 // * Frames resampled to output frequency. | |
647 scoped_ptr<media::AudioTimestampHelper> audio_decoded_ts_; | |
648 scoped_ptr<media::AudioTimestampHelper> audio_scaled_ts_; | |
649 scoped_ptr<media::AudioTimestampHelper> audio_resampled_ts_; | |
650 | 730 |
651 std::queue<scoped_refptr<VideoFrame> > video_frame_queue_; | 731 std::queue<scoped_refptr<VideoFrame> > video_frame_queue_; |
| 732 int64 video_first_pts_; |
| 733 bool video_first_pts_set_; |
| 734 |
652 std::queue<AudioBus*> audio_bus_queue_; | 735 std::queue<AudioBus*> audio_bus_queue_; |
653 | 736 |
654 DISALLOW_COPY_AND_ASSIGN(SendProcess); | 737 DISALLOW_COPY_AND_ASSIGN(SendProcess); |
655 }; | 738 }; |
656 | 739 |
657 } // namespace cast | 740 } // namespace cast |
658 } // namespace media | 741 } // namespace media |
659 | 742 |
660 namespace { | 743 namespace { |
661 void UpdateCastTransportStatus( | 744 void UpdateCastTransportStatus( |
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
879 base::Passed(&video_event_subscriber), | 962 base::Passed(&video_event_subscriber), |
880 base::Passed(&audio_event_subscriber), | 963 base::Passed(&audio_event_subscriber), |
881 base::Passed(&video_log_file), | 964 base::Passed(&video_log_file), |
882 base::Passed(&audio_log_file)), | 965 base::Passed(&audio_log_file)), |
883 base::TimeDelta::FromSeconds(logging_duration_seconds)); | 966 base::TimeDelta::FromSeconds(logging_duration_seconds)); |
884 send_process->Start(cast_sender->audio_frame_input(), | 967 send_process->Start(cast_sender->audio_frame_input(), |
885 cast_sender->video_frame_input()); | 968 cast_sender->video_frame_input()); |
886 io_message_loop.Run(); | 969 io_message_loop.Run(); |
887 return 0; | 970 return 0; |
888 } | 971 } |
OLD | NEW |