Index: chrome/renderer/media/cast_rtp_stream.cc |
diff --git a/chrome/renderer/media/cast_rtp_stream.cc b/chrome/renderer/media/cast_rtp_stream.cc |
index 36af2795018cf4dbe0760d444253cb8984be8602..577ed7549c2cb5c3fe9f3258b3fca015d0c700a1 100644 |
--- a/chrome/renderer/media/cast_rtp_stream.cc |
+++ b/chrome/renderer/media/cast_rtp_stream.cc |
@@ -12,8 +12,11 @@ |
#include "content/public/renderer/media_stream_audio_sink.h" |
#include "content/public/renderer/media_stream_video_sink.h" |
#include "content/public/renderer/render_thread.h" |
+#include "media/audio/audio_parameters.h" |
#include "media/base/audio_bus.h" |
+#include "media/base/audio_fifo.h" |
#include "media/base/bind_to_current_loop.h" |
+#include "media/base/multi_channel_resampler.h" |
#include "media/cast/cast_config.h" |
#include "media/cast/cast_defines.h" |
#include "media/cast/cast_sender.h" |
@@ -24,9 +27,17 @@ using media::cast::AudioSenderConfig; |
using media::cast::VideoSenderConfig; |
namespace { |
+ |
const char kCodecNameOpus[] = "OPUS"; |
const char kCodecNameVp8[] = "VP8"; |
+// This constant defines the number of sets of audio data to buffer |
+// in the FIFO. If input audio and output data have different resampling |
+// rates then buffer is necessary to avoid audio glitches. |
+// See CastAudioSink::ResampleData() and CastAudioSink::OnSetFormat() |
+// for more defaults. |
+const int kBufferAudioData = 2; |
+ |
CastRtpPayloadParams DefaultOpusPayload() { |
CastRtpPayloadParams payload; |
payload.ssrc = 1; |
@@ -174,14 +185,18 @@ class CastAudioSink : public base::SupportsWeakPtr<CastAudioSink>, |
// |track| provides data for this sink. |
// |error_callback| is called if audio formats don't match. |
CastAudioSink(const blink::WebMediaStreamTrack& track, |
- const CastRtpStream::ErrorCallback& error_callback) |
+ const CastRtpStream::ErrorCallback& error_callback, |
+ int output_channels, |
+ int output_sample_rate) |
: track_(track), |
sink_added_(false), |
error_callback_(error_callback), |
weak_factory_(this), |
render_thread_task_runner_(content::RenderThread::Get() |
- ->GetMessageLoop() |
- ->message_loop_proxy()) {} |
+ ->GetMessageLoop() |
+ ->message_loop_proxy()), |
+ output_channels_(output_channels), |
+ output_sample_rate_(output_sample_rate) {} |
virtual ~CastAudioSink() { |
if (sink_added_) |
@@ -194,9 +209,18 @@ class CastAudioSink : public base::SupportsWeakPtr<CastAudioSink>, |
int sample_rate, |
int number_of_channels, |
int number_of_frames) OVERRIDE { |
- scoped_ptr<media::AudioBus> audio_bus( |
- media::AudioBus::Create(number_of_channels, number_of_frames)); |
- audio_bus->FromInterleaved(audio_data, number_of_frames, 2); |
+ scoped_ptr<media::AudioBus> input_bus = |
DaleCurtis
2014/03/07 01:14:03
Don't create this every time, it can be quite expe
Alpha Left Google
2014/03/07 01:34:01
I think the only way is to save on the allocation
DaleCurtis
2014/03/07 01:49:00
How about reflowing this block so that ResampleDat
|
+ media::AudioBus::Create( |
+ number_of_channels, number_of_frames).Pass(); |
+ input_bus->FromInterleaved( |
+ audio_data, number_of_frames, number_of_channels); |
+ |
+ if (resampler_) { |
+ DCHECK_EQ(number_of_channels, output_channels_); |
+ input_bus = ResampleData(input_bus.Pass(), sample_rate); |
+ if (!input_bus) |
+ return; |
+ } |
// TODO(hclam): Pass in the accurate capture time to have good |
// audio / video sync. |
@@ -204,27 +228,67 @@ class CastAudioSink : public base::SupportsWeakPtr<CastAudioSink>, |
// TODO(hclam): We shouldn't hop through the render thread. |
// Bounce the call from the real-time audio thread to the render thread. |
// Needed since frame_input_ can be changed runtime by the render thread. |
- media::AudioBus* const audio_bus_ptr = audio_bus.get(); |
+ media::AudioBus* const input_bus_ptr = input_bus.get(); |
render_thread_task_runner_->PostTask( |
FROM_HERE, |
base::Bind(&CastAudioSink::SendAudio, |
weak_factory_.GetWeakPtr(), |
- audio_bus_ptr, |
+ input_bus_ptr, |
base::TimeTicks::Now(), |
- base::Bind(&DeleteAudioBus, base::Passed(&audio_bus)))); |
+ base::Bind(&DeleteAudioBus, base::Passed(&input_bus)))); |
+ } |
+ |
+ // Return a resampled audio data from input. This is called when the |
+ // input sample rate doesn't match the output. |
+ // The flow of data is as follows: |
+ // |audio_data| -> |
+ // AudioFifo |fifo_| -> |
+ // MultiChannelResampler |resampler|. |
+ // |
+ // The resampler pulls data out of the FIFO and resample the data in |
+ // frequency domain. It might call |fifo_| for more than once. But no more |
+ // than |kBufferAudioData| times. That's why we need to ensure |fifo_| has |
+ // at least |kBufferAudioData| sets of audio data. Each set of audio data is |
+ // 10ms worth. |
+ scoped_ptr<media::AudioBus> ResampleData( |
DaleCurtis
2014/03/07 01:14:03
Seems unnecessary to extract this to a new functio
Alpha Left Google
2014/03/07 01:34:01
It looks cleaner to me to have a separate method f
|
+ scoped_ptr<media::AudioBus> input_bus, |
+ int input_sample_rate) { |
+ // Make sure FIFO is completely full. |
+ fifo_->Push(input_bus.get()); |
+ if (fifo_->frames() != fifo_->max_frames()) |
DaleCurtis
2014/03/07 01:14:03
I think you just want to make sure you don't respo
Alpha Left Google
2014/03/07 01:34:01
I changed it to a preroll.
|
+ return scoped_ptr<media::AudioBus>(); |
+ |
+ scoped_ptr<media::AudioBus> output_bus( |
DaleCurtis
2014/03/07 01:14:03
Ditto. Just return a bool if there's no data avai
Alpha Left Google
2014/03/07 01:34:01
Not sure I understand the suggestion here.
|
+ media::AudioBus::Create( |
+ output_channels_, |
+ output_sample_rate_ * input_bus->frames() / input_sample_rate)); |
+ |
+ // Resampler will then call ProvideData() below to fetch data from |
+ // |input_data_|. |
+ resampler_->Resample(output_bus->frames(), output_bus.get()); |
+ return output_bus.Pass(); |
} |
- void SendAudio(const media::AudioBus* audio_bus_ptr, |
+ void SendAudio(const media::AudioBus* audio_bus, |
const base::TimeTicks& recorded_time, |
const base::Closure& done_callback) { |
DCHECK(render_thread_task_runner_->BelongsToCurrentThread()); |
DCHECK(frame_input_); |
- frame_input_->InsertAudio(audio_bus_ptr, recorded_time, done_callback); |
+ frame_input_->InsertAudio(audio_bus, recorded_time, done_callback); |
} |
// Called on real-time audio thread. |
virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE { |
- NOTIMPLEMENTED(); |
+ if (params.sample_rate() == output_sample_rate_) |
DaleCurtis
2014/03/07 01:14:03
When and who calls this? Do you need to worry abou
Alpha Left Google
2014/03/07 01:34:01
This is called once before there audio data arrive
|
+ return; |
+ fifo_.reset(new media::AudioFifo( |
+ output_channels_, |
+ kBufferAudioData * params.frames_per_buffer())); |
+ resampler_.reset(new media::MultiChannelResampler( |
+ output_channels_, |
+ static_cast<double>(params.sample_rate()) / output_sample_rate_, |
+ params.frames_per_buffer(), |
+ base::Bind(&CastAudioSink::ProvideData, base::Unretained(this)))); |
} |
// See CastVideoSink for details. |
@@ -237,6 +301,10 @@ class CastAudioSink : public base::SupportsWeakPtr<CastAudioSink>, |
} |
} |
+ void ProvideData(int frame_delay, media::AudioBus* output_bus) { |
+ fifo_->Consume(output_bus, 0, output_bus->frames()); |
+ } |
+ |
private: |
blink::WebMediaStreamTrack track_; |
scoped_refptr<media::cast::FrameInput> frame_input_; |
@@ -245,6 +313,11 @@ class CastAudioSink : public base::SupportsWeakPtr<CastAudioSink>, |
base::WeakPtrFactory<CastAudioSink> weak_factory_; |
scoped_refptr<base::SingleThreadTaskRunner> render_thread_task_runner_; |
+ scoped_ptr<media::MultiChannelResampler> resampler_; |
+ scoped_ptr<media::AudioFifo> fifo_; |
+ const int output_channels_; |
+ const int output_sample_rate_; |
+ |
DISALLOW_COPY_AND_ASSIGN(CastAudioSink); |
}; |
@@ -309,12 +382,15 @@ void CastRtpStream::Start(const CastRtpParams& params, |
DidEncounterError("Invalid parameters for audio."); |
return; |
} |
+ |
// In case of error we have to go through DidEncounterError() to stop |
// the streaming after reporting the error. |
audio_sink_.reset(new CastAudioSink( |
track_, |
media::BindToCurrentLoop(base::Bind(&CastRtpStream::DidEncounterError, |
- weak_factory_.GetWeakPtr())))); |
+ weak_factory_.GetWeakPtr())), |
+ params.payload.channels, |
+ params.payload.clock_rate)); |
cast_session_->StartAudio( |
config, |
base::Bind(&CastAudioSink::AddToTrack, |