OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "chrome/renderer/media/cast_rtp_stream.h" | 5 #include "chrome/renderer/media/cast_rtp_stream.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/command_line.h" | 8 #include "base/command_line.h" |
9 #include "base/debug/trace_event.h" | 9 #include "base/debug/trace_event.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
11 #include "base/memory/weak_ptr.h" | 11 #include "base/memory/weak_ptr.h" |
12 #include "base/strings/stringprintf.h" | 12 #include "base/strings/stringprintf.h" |
13 #include "base/sys_info.h" | 13 #include "base/sys_info.h" |
14 #include "chrome/common/chrome_switches.h" | 14 #include "chrome/common/chrome_switches.h" |
15 #include "chrome/renderer/media/cast_session.h" | 15 #include "chrome/renderer/media/cast_session.h" |
16 #include "chrome/renderer/media/cast_udp_transport.h" | 16 #include "chrome/renderer/media/cast_udp_transport.h" |
17 #include "content/public/renderer/media_stream_audio_sink.h" | 17 #include "content/public/renderer/media_stream_audio_sink.h" |
18 #include "content/public/renderer/media_stream_video_sink.h" | 18 #include "content/public/renderer/media_stream_video_sink.h" |
19 #include "content/public/renderer/render_thread.h" | 19 #include "content/public/renderer/render_thread.h" |
20 #include "content/public/renderer/video_encode_accelerator.h" | 20 #include "content/public/renderer/video_encode_accelerator.h" |
21 #include "media/audio/audio_parameters.h" | 21 #include "media/audio/audio_parameters.h" |
22 #include "media/base/audio_bus.h" | 22 #include "media/base/audio_bus.h" |
| 23 #include "media/base/audio_converter.h" |
23 #include "media/base/audio_fifo.h" | 24 #include "media/base/audio_fifo.h" |
24 #include "media/base/bind_to_current_loop.h" | 25 #include "media/base/bind_to_current_loop.h" |
25 #include "media/base/multi_channel_resampler.h" | |
26 #include "media/base/video_frame.h" | 26 #include "media/base/video_frame.h" |
27 #include "media/cast/cast_config.h" | 27 #include "media/cast/cast_config.h" |
28 #include "media/cast/cast_defines.h" | 28 #include "media/cast/cast_defines.h" |
29 #include "media/cast/cast_sender.h" | 29 #include "media/cast/cast_sender.h" |
30 #include "media/cast/net/cast_transport_config.h" | 30 #include "media/cast/net/cast_transport_config.h" |
31 #include "third_party/WebKit/public/platform/WebMediaStreamSource.h" | 31 #include "third_party/WebKit/public/platform/WebMediaStreamSource.h" |
32 #include "ui/gfx/geometry/size.h" | 32 #include "ui/gfx/geometry/size.h" |
33 | 33 |
34 using media::cast::AudioSenderConfig; | 34 using media::cast::AudioSenderConfig; |
35 using media::cast::VideoSenderConfig; | 35 using media::cast::VideoSenderConfig; |
36 | 36 |
37 namespace { | 37 namespace { |
38 | 38 |
39 const char kCodecNameOpus[] = "OPUS"; | 39 const char kCodecNameOpus[] = "OPUS"; |
40 const char kCodecNameVp8[] = "VP8"; | 40 const char kCodecNameVp8[] = "VP8"; |
41 const char kCodecNameH264[] = "H264"; | 41 const char kCodecNameH264[] = "H264"; |
42 | 42 |
43 // To convert from kilobits per second to bits to per second. | 43 // To convert from kilobits per second to bits to per second. |
44 const int kBitrateMultiplier = 1000; | 44 const int kBitrateMultiplier = 1000; |
45 | 45 |
46 // This constant defines the number of sets of audio data to buffer | |
47 // in the FIFO. If input audio and output data have different resampling | |
48 // rates then buffer is necessary to avoid audio glitches. | |
49 // See CastAudioSink::ResampleData() and CastAudioSink::OnSetFormat() | |
50 // for more defaults. | |
51 const int kBufferAudioData = 2; | |
52 | |
53 CastRtpPayloadParams DefaultOpusPayload() { | 46 CastRtpPayloadParams DefaultOpusPayload() { |
54 CastRtpPayloadParams payload; | 47 CastRtpPayloadParams payload; |
55 payload.payload_type = 127; | 48 payload.payload_type = 127; |
56 payload.max_latency_ms = media::cast::kDefaultRtpMaxDelayMs; | 49 payload.max_latency_ms = media::cast::kDefaultRtpMaxDelayMs; |
57 payload.ssrc = 1; | 50 payload.ssrc = 1; |
58 payload.feedback_ssrc = 2; | 51 payload.feedback_ssrc = 2; |
59 payload.clock_rate = media::cast::kDefaultAudioSamplingRate; | 52 payload.clock_rate = media::cast::kDefaultAudioSamplingRate; |
60 // The value is 0 which means VBR. | 53 // The value is 0 which means VBR. |
61 payload.min_bitrate = payload.max_bitrate = | 54 payload.min_bitrate = payload.max_bitrate = |
62 media::cast::kDefaultAudioEncoderBitrate; | 55 media::cast::kDefaultAudioEncoderBitrate; |
(...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
340 DISALLOW_COPY_AND_ASSIGN(CastVideoSink); | 333 DISALLOW_COPY_AND_ASSIGN(CastVideoSink); |
341 }; | 334 }; |
342 | 335 |
343 // Receives audio data from a MediaStreamTrack. Data is submitted to | 336 // Receives audio data from a MediaStreamTrack. Data is submitted to |
344 // media::cast::FrameInput. | 337 // media::cast::FrameInput. |
345 // | 338 // |
346 // Threading: Audio frames are received on the real-time audio thread. | 339 // Threading: Audio frames are received on the real-time audio thread. |
347 // Note that RemoveFromAudioTrack() is synchronous and we have | 340 // Note that RemoveFromAudioTrack() is synchronous and we have |
348 // gurantee that there will be no more audio data after calling it. | 341 // gurantee that there will be no more audio data after calling it. |
349 class CastAudioSink : public base::SupportsWeakPtr<CastAudioSink>, | 342 class CastAudioSink : public base::SupportsWeakPtr<CastAudioSink>, |
350 public content::MediaStreamAudioSink { | 343 public content::MediaStreamAudioSink, |
| 344 public media::AudioConverter::InputCallback { |
351 public: | 345 public: |
352 // |track| provides data for this sink. | 346 // |track| provides data for this sink. |
353 // |error_callback| is called if audio formats don't match. | 347 // |error_callback| is called if audio formats don't match. |
354 CastAudioSink(const blink::WebMediaStreamTrack& track, | 348 CastAudioSink(const blink::WebMediaStreamTrack& track, |
355 const CastRtpStream::ErrorCallback& error_callback, | |
356 int output_channels, | 349 int output_channels, |
357 int output_sample_rate) | 350 int output_sample_rate) |
358 : track_(track), | 351 : track_(track), |
359 sink_added_(false), | |
360 error_callback_(error_callback), | |
361 weak_factory_(this), | |
362 output_channels_(output_channels), | 352 output_channels_(output_channels), |
363 output_sample_rate_(output_sample_rate), | 353 output_sample_rate_(output_sample_rate), |
364 input_preroll_(0) {} | 354 sample_frames_in_(0), |
| 355 sample_frames_out_(0) {} |
365 | 356 |
366 ~CastAudioSink() override { | 357 ~CastAudioSink() override { |
367 if (sink_added_) | 358 if (frame_input_.get()) |
368 RemoveFromAudioTrack(this, track_); | 359 RemoveFromAudioTrack(this, track_); |
369 } | 360 } |
370 | 361 |
371 // Called on real-time audio thread. | |
372 // content::MediaStreamAudioSink implementation. | |
373 void OnData(const int16* audio_data, | |
374 int sample_rate, | |
375 int number_of_channels, | |
376 int number_of_frames) override { | |
377 scoped_ptr<media::AudioBus> input_bus; | |
378 if (resampler_) { | |
379 input_bus = ResampleData( | |
380 audio_data, sample_rate, number_of_channels, number_of_frames); | |
381 if (!input_bus) | |
382 return; | |
383 } else { | |
384 input_bus = media::AudioBus::Create( | |
385 number_of_channels, number_of_frames); | |
386 input_bus->FromInterleaved( | |
387 audio_data, number_of_frames, number_of_channels); | |
388 } | |
389 | |
390 // TODO(hclam): Pass in the accurate capture time to have good | |
391 // audio / video sync. | |
392 frame_input_->InsertAudio(input_bus.Pass(), base::TimeTicks::Now()); | |
393 } | |
394 | |
395 // Return a resampled audio data from input. This is called when the | |
396 // input sample rate doesn't match the output. | |
397 // The flow of data is as follows: | |
398 // |audio_data| -> | |
399 // AudioFifo |fifo_| -> | |
400 // MultiChannelResampler |resampler|. | |
401 // | |
402 // The resampler pulls data out of the FIFO and resample the data in | |
403 // frequency domain. It might call |fifo_| for more than once. But no more | |
404 // than |kBufferAudioData| times. We preroll audio data into the FIFO to | |
405 // make sure there's enough data for resampling. | |
406 scoped_ptr<media::AudioBus> ResampleData( | |
407 const int16* audio_data, | |
408 int sample_rate, | |
409 int number_of_channels, | |
410 int number_of_frames) { | |
411 DCHECK_EQ(number_of_channels, output_channels_); | |
412 fifo_input_bus_->FromInterleaved( | |
413 audio_data, number_of_frames, number_of_channels); | |
414 fifo_->Push(fifo_input_bus_.get()); | |
415 | |
416 if (input_preroll_ < kBufferAudioData - 1) { | |
417 ++input_preroll_; | |
418 return scoped_ptr<media::AudioBus>(); | |
419 } | |
420 | |
421 scoped_ptr<media::AudioBus> output_bus( | |
422 media::AudioBus::Create( | |
423 output_channels_, | |
424 output_sample_rate_ * fifo_input_bus_->frames() / sample_rate)); | |
425 | |
426 // Resampler will then call ProvideData() below to fetch data from | |
427 // |input_data_|. | |
428 resampler_->Resample(output_bus->frames(), output_bus.get()); | |
429 return output_bus.Pass(); | |
430 } | |
431 | |
432 // Called on real-time audio thread. | |
433 void OnSetFormat(const media::AudioParameters& params) override { | |
434 if (params.sample_rate() == output_sample_rate_) | |
435 return; | |
436 fifo_.reset(new media::AudioFifo( | |
437 output_channels_, | |
438 kBufferAudioData * params.frames_per_buffer())); | |
439 fifo_input_bus_ = media::AudioBus::Create( | |
440 params.channels(), params.frames_per_buffer()); | |
441 resampler_.reset(new media::MultiChannelResampler( | |
442 output_channels_, | |
443 static_cast<double>(params.sample_rate()) / output_sample_rate_, | |
444 params.frames_per_buffer(), | |
445 base::Bind(&CastAudioSink::ProvideData, base::Unretained(this)))); | |
446 } | |
447 | |
448 // Add this sink to the track. Data received from the track will be | 362 // Add this sink to the track. Data received from the track will be |
449 // submitted to |frame_input|. | 363 // submitted to |frame_input|. |
450 void AddToTrack( | 364 void AddToTrack( |
451 const scoped_refptr<media::cast::AudioFrameInput>& frame_input) { | 365 const scoped_refptr<media::cast::AudioFrameInput>& frame_input) { |
452 DCHECK(!sink_added_); | 366 DCHECK(frame_input.get()); |
453 sink_added_ = true; | 367 DCHECK(!frame_input_.get()); |
454 | |
455 // This member is written here and then accessed on the IO thread | 368 // This member is written here and then accessed on the IO thread |
456 // We will not get data until AddToAudioTrack is called so it is | 369 // We will not get data until AddToAudioTrack is called so it is |
457 // safe to access this member now. | 370 // safe to access this member now. |
458 frame_input_ = frame_input; | 371 frame_input_ = frame_input; |
459 AddToAudioTrack(this, track_); | 372 AddToAudioTrack(this, track_); |
460 } | 373 } |
461 | 374 |
462 void ProvideData(int frame_delay, media::AudioBus* output_bus) { | 375 protected: |
463 fifo_->Consume(output_bus, 0, output_bus->frames()); | 376 // Called on real-time audio thread. |
| 377 // TODO(miu): This interface is horrible: The first arg should be an AudioBus, |
| 378 // while the remaining three are redundant as they are provided in the call to |
| 379 // OnSetFormat(). http://crbug.com/437064 |
| 380 void OnData(const int16* audio_data, |
| 381 int sample_rate, |
| 382 int number_of_channels, |
| 383 int number_of_sample_frames) override { |
| 384 DCHECK(audio_data); |
| 385 DCHECK_EQ(sample_rate, input_params_.sample_rate()); |
| 386 DCHECK_EQ(number_of_channels, input_params_.channels()); |
| 387 DCHECK_EQ(number_of_sample_frames, input_params_.frames_per_buffer()); |
| 388 |
| 389 // TODO(miu): Plumbing is needed to determine the actual reference timestamp |
| 390 // of the audio for proper audio/video sync. http://crbug.com/335335 |
| 391 base::TimeTicks reference_time = base::TimeTicks::Now(); |
| 392 |
| 393 if (converter_) { |
| 394 // Make an adjustment to the |reference_time| to account for the portion |
| 395 // of the audio signal enqueued within |fifo_| and |converter_|. |
| 396 const base::TimeDelta signal_duration_already_buffered = |
| 397 (sample_frames_in_ * base::TimeDelta::FromSeconds(1) / |
| 398 input_params_.sample_rate()) - |
| 399 (sample_frames_out_ * base::TimeDelta::FromSeconds(1) / |
| 400 output_sample_rate_); |
| 401 DVLOG(2) << "Audio reference time adjustment: -(" |
| 402 << signal_duration_already_buffered.InMicroseconds() << " us)"; |
| 403 reference_time -= signal_duration_already_buffered; |
| 404 |
| 405 // TODO(miu): Eliminate need for extra copying of samples to do |
| 406 // resampling. This will require AudioConverter changes. |
| 407 fifo_input_bus_->FromInterleaved( |
| 408 audio_data, input_params_.frames_per_buffer(), sizeof(audio_data[0])); |
| 409 const int fifo_frames_remaining = fifo_->max_frames() - fifo_->frames(); |
| 410 if (fifo_frames_remaining < input_params_.frames_per_buffer()) { |
| 411 NOTREACHED() |
| 412 << "Audio FIFO overrun: " << input_params_.frames_per_buffer() |
| 413 << " > " << fifo_frames_remaining; |
| 414 sample_frames_in_ -= fifo_->frames(); |
| 415 fifo_->Clear(); |
| 416 } |
| 417 fifo_->Push(fifo_input_bus_.get()); |
| 418 sample_frames_in_ += input_params_.frames_per_buffer(); |
| 419 |
| 420 const int sample_frames_out_per_chunk = |
| 421 output_sample_rate_ * input_params_.frames_per_buffer() / |
| 422 input_params_.sample_rate(); |
| 423 while (fifo_->frames() >= converter_->ChunkSize()) { |
| 424 scoped_ptr<media::AudioBus> audio_bus = media::AudioBus::Create( |
| 425 output_channels_, sample_frames_out_per_chunk); |
| 426 // AudioConverter will call ProvideInput() to fetch data from |fifo_|. |
| 427 converter_->Convert(audio_bus.get()); |
| 428 sample_frames_out_ += sample_frames_out_per_chunk; |
| 429 frame_input_->InsertAudio(audio_bus.Pass(), reference_time); |
| 430 reference_time += |
| 431 sample_frames_out_per_chunk * base::TimeDelta::FromSeconds(1) / |
| 432 output_sample_rate_; |
| 433 } |
| 434 } else { |
| 435 scoped_ptr<media::AudioBus> audio_bus = media::AudioBus::Create( |
| 436 input_params_.channels(), input_params_.frames_per_buffer()); |
| 437 audio_bus->FromInterleaved( |
| 438 audio_data, input_params_.frames_per_buffer(), sizeof(audio_data[0])); |
| 439 frame_input_->InsertAudio(audio_bus.Pass(), reference_time); |
| 440 } |
| 441 } |
| 442 |
| 443 // Called on real-time audio thread. |
| 444 void OnSetFormat(const media::AudioParameters& params) override { |
| 445 if (input_params_.Equals(params)) |
| 446 return; |
| 447 input_params_ = params; |
| 448 |
| 449 if (input_params_.channels() == output_channels_ && |
| 450 input_params_.sample_rate() == output_sample_rate_) { |
| 451 DVLOG(1) << "No audio resampling is needed."; |
| 452 converter_.reset(); |
| 453 fifo_input_bus_.reset(); |
| 454 fifo_.reset(); |
| 455 } else { |
| 456 DVLOG(1) << "Setting up audio resampling: {" |
| 457 << input_params_.channels() << " channels, " |
| 458 << input_params_.sample_rate() << " Hz} --> {" |
| 459 << output_channels_ << " channels, " |
| 460 << output_sample_rate_ << " Hz}"; |
| 461 const media::AudioParameters output_params( |
| 462 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 463 media::GuessChannelLayout(output_channels_), |
| 464 output_sample_rate_, 32, |
| 465 output_sample_rate_ * input_params_.frames_per_buffer() / |
| 466 input_params_.sample_rate()); |
| 467 converter_.reset( |
| 468 new media::AudioConverter(input_params_, output_params, false)); |
| 469 converter_->AddInput(this); |
| 470 fifo_input_bus_ = media::AudioBus::Create( |
| 471 input_params_.channels(), input_params_.frames_per_buffer()); |
| 472 fifo_.reset(new media::AudioFifo( |
| 473 input_params_.channels(), |
| 474 converter_->ChunkSize() + input_params_.frames_per_buffer())); |
| 475 sample_frames_in_ = 0; |
| 476 sample_frames_out_ = 0; |
| 477 } |
| 478 } |
| 479 |
| 480 // Called on real-time audio thread. |
| 481 double ProvideInput(media::AudioBus* audio_bus, |
| 482 base::TimeDelta buffer_delay) override { |
| 483 fifo_->Consume(audio_bus, 0, audio_bus->frames()); |
| 484 return 1.0; |
464 } | 485 } |
465 | 486 |
466 private: | 487 private: |
467 blink::WebMediaStreamTrack track_; | 488 const blink::WebMediaStreamTrack track_; |
468 bool sink_added_; | |
469 CastRtpStream::ErrorCallback error_callback_; | |
470 base::WeakPtrFactory<CastAudioSink> weak_factory_; | |
471 | |
472 const int output_channels_; | 489 const int output_channels_; |
473 const int output_sample_rate_; | 490 const int output_sample_rate_; |
474 | 491 |
475 // These member are accessed on the real-time audio time only. | 492 // This must be set before the real-time audio thread starts calling OnData(), |
| 493 // and remain unchanged until after the thread will stop calling OnData(). |
476 scoped_refptr<media::cast::AudioFrameInput> frame_input_; | 494 scoped_refptr<media::cast::AudioFrameInput> frame_input_; |
477 scoped_ptr<media::MultiChannelResampler> resampler_; | 495 |
| 496 // These members are accessed on the real-time audio time only. |
| 497 media::AudioParameters input_params_; |
| 498 scoped_ptr<media::AudioConverter> converter_; |
| 499 scoped_ptr<media::AudioBus> fifo_input_bus_; |
478 scoped_ptr<media::AudioFifo> fifo_; | 500 scoped_ptr<media::AudioFifo> fifo_; |
479 scoped_ptr<media::AudioBus> fifo_input_bus_; | 501 int64 sample_frames_in_; |
480 int input_preroll_; | 502 int64 sample_frames_out_; |
481 | 503 |
482 DISALLOW_COPY_AND_ASSIGN(CastAudioSink); | 504 DISALLOW_COPY_AND_ASSIGN(CastAudioSink); |
483 }; | 505 }; |
484 | 506 |
485 CastRtpParams::CastRtpParams(const CastRtpPayloadParams& payload_params) | 507 CastRtpParams::CastRtpParams(const CastRtpPayloadParams& payload_params) |
486 : payload(payload_params) {} | 508 : payload(payload_params) {} |
487 | 509 |
488 CastCodecSpecificParams::CastCodecSpecificParams() {} | 510 CastCodecSpecificParams::CastCodecSpecificParams() {} |
489 | 511 |
490 CastCodecSpecificParams::~CastCodecSpecificParams() {} | 512 CastCodecSpecificParams::~CastCodecSpecificParams() {} |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
536 AudioSenderConfig config; | 558 AudioSenderConfig config; |
537 if (!ToAudioSenderConfig(params, &config)) { | 559 if (!ToAudioSenderConfig(params, &config)) { |
538 DidEncounterError("Invalid parameters for audio."); | 560 DidEncounterError("Invalid parameters for audio."); |
539 return; | 561 return; |
540 } | 562 } |
541 | 563 |
542 // In case of error we have to go through DidEncounterError() to stop | 564 // In case of error we have to go through DidEncounterError() to stop |
543 // the streaming after reporting the error. | 565 // the streaming after reporting the error. |
544 audio_sink_.reset(new CastAudioSink( | 566 audio_sink_.reset(new CastAudioSink( |
545 track_, | 567 track_, |
546 media::BindToCurrentLoop(base::Bind(&CastRtpStream::DidEncounterError, | |
547 weak_factory_.GetWeakPtr())), | |
548 params.payload.channels, | 568 params.payload.channels, |
549 params.payload.clock_rate)); | 569 params.payload.clock_rate)); |
550 cast_session_->StartAudio( | 570 cast_session_->StartAudio( |
551 config, | 571 config, |
552 base::Bind(&CastAudioSink::AddToTrack, audio_sink_->AsWeakPtr()), | 572 base::Bind(&CastAudioSink::AddToTrack, audio_sink_->AsWeakPtr()), |
553 base::Bind(&CastRtpStream::DidEncounterError, | 573 base::Bind(&CastRtpStream::DidEncounterError, |
554 weak_factory_.GetWeakPtr())); | 574 weak_factory_.GetWeakPtr())); |
555 start_callback.Run(); | 575 start_callback.Run(); |
556 } else { | 576 } else { |
557 VideoSenderConfig config; | 577 VideoSenderConfig config; |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
610 void CastRtpStream::DidEncounterError(const std::string& message) { | 630 void CastRtpStream::DidEncounterError(const std::string& message) { |
611 DVLOG(1) << "CastRtpStream::DidEncounterError(" << message << ") = " | 631 DVLOG(1) << "CastRtpStream::DidEncounterError(" << message << ") = " |
612 << (IsAudio() ? "audio" : "video"); | 632 << (IsAudio() ? "audio" : "video"); |
613 // Save the WeakPtr first because the error callback might delete this object. | 633 // Save the WeakPtr first because the error callback might delete this object. |
614 base::WeakPtr<CastRtpStream> ptr = weak_factory_.GetWeakPtr(); | 634 base::WeakPtr<CastRtpStream> ptr = weak_factory_.GetWeakPtr(); |
615 error_callback_.Run(message); | 635 error_callback_.Run(message); |
616 content::RenderThread::Get()->GetMessageLoop()->PostTask( | 636 content::RenderThread::Get()->GetMessageLoop()->PostTask( |
617 FROM_HERE, | 637 FROM_HERE, |
618 base::Bind(&CastRtpStream::Stop, ptr)); | 638 base::Bind(&CastRtpStream::Stop, ptr)); |
619 } | 639 } |
OLD | NEW |