| Index: content/renderer/media/media_stream_dependency_factory.cc
|
| diff --git a/content/renderer/media/media_stream_dependency_factory.cc b/content/renderer/media/media_stream_dependency_factory.cc
|
| index 31072a2149b68cdeb0b7153c56702710823ad30a..14cbf4387dd4919c1d15ff27ac8f23f81f8cf000 100644
|
| --- a/content/renderer/media/media_stream_dependency_factory.cc
|
| +++ b/content/renderer/media/media_stream_dependency_factory.cc
|
| @@ -190,6 +190,7 @@ void MediaStreamDependencyFactory::CreateNativeMediaSources(
|
| const WebKit::WebMediaConstraints& video_constraints,
|
| WebKit::WebMediaStreamDescriptor* description,
|
| const MediaSourcesCreatedCallback& sources_created) {
|
| + DVLOG(1) << "MediaStreamDependencyFactory::CreateNativeMediaSources()";
|
| if (!EnsurePeerConnectionFactory()) {
|
| sources_created.Run(description, false);
|
| return;
|
| @@ -200,8 +201,6 @@ void MediaStreamDependencyFactory::CreateNativeMediaSources(
|
| SourceStateObserver* source_observer =
|
| new SourceStateObserver(description, sources_created);
|
|
|
| - // TODO(perkj): Implement local audio sources.
|
| -
|
| // Create local video sources.
|
| RTCMediaConstraints native_video_constraints(video_constraints);
|
| WebKit::WebVector<WebKit::WebMediaStreamComponent> video_components;
|
| @@ -223,11 +222,37 @@ void MediaStreamDependencyFactory::CreateNativeMediaSources(
|
| &native_video_constraints));
|
| source_observer->AddSource(source_data->video_source());
|
| }
|
| +
|
| + // Do additional source initialization if the audio source is a valid
|
| + // microphone.
|
| + WebKit::WebVector<WebKit::WebMediaStreamComponent> audio_components;
|
| + description->audioSources(audio_components);
|
| + for (size_t i = 0; i < audio_components.size(); ++i) {
|
| + const WebKit::WebMediaStreamSource& source = audio_components[i].source();
|
| + MediaStreamSourceExtraData* source_data =
|
| + static_cast<MediaStreamSourceExtraData*>(source.extraData());
|
| + if (!source_data) {
|
| + // TODO(henrika): Implement support for sources from remote MediaStreams.
|
| + NOTIMPLEMENTED();
|
| + continue;
|
| + }
|
| +
|
| + const StreamDeviceInfo device_info = source_data->device_info();
|
| + if (device_info.device.type == content::MEDIA_DEVICE_AUDIO_CAPTURE) {
|
| + if (!InitializeAudioSource(device_info)) {
|
| + DLOG(WARNING) << "Unsupported audio source";
|
| + sources_created.Run(description, false);
|
| + return;
|
| + }
|
| + }
|
| + }
|
| +
|
| source_observer->StartObservering();
|
| }
|
|
|
| void MediaStreamDependencyFactory::CreateNativeLocalMediaStream(
|
| WebKit::WebMediaStreamDescriptor* description) {
|
| + DVLOG(1) << "MediaStreamDependencyFactory::CreateNativeLocalMediaStream()";
|
| DCHECK(PeerConnectionFactoryCreated());
|
|
|
| std::string label = UTF16ToUTF8(description->label());
|
| @@ -253,10 +278,6 @@ void MediaStreamDependencyFactory::CreateNativeLocalMediaStream(
|
| CreateLocalAudioTrack(UTF16ToUTF8(source.id()), NULL));
|
| native_stream->AddTrack(audio_track);
|
| audio_track->set_enabled(audio_components[i].isEnabled());
|
| - // TODO(xians): This set the source of all audio tracks to the same
|
| - // microphone. Implement support for setting the source per audio track
|
| - // instead.
|
| - SetAudioDeviceSessionId(source_data->device_info().session_id);
|
| }
|
|
|
| // Add video tracks.
|
| @@ -295,6 +316,7 @@ void MediaStreamDependencyFactory::CreateNativeLocalMediaStream(
|
| }
|
|
|
| bool MediaStreamDependencyFactory::CreatePeerConnectionFactory() {
|
| + DVLOG(1) << "MediaStreamDependencyFactory::CreatePeerConnectionFactory()";
|
| if (!pc_factory_.get()) {
|
| DCHECK(!audio_device_);
|
| audio_device_ = new WebRtcAudioDeviceImpl();
|
| @@ -352,6 +374,32 @@ MediaStreamDependencyFactory::CreateVideoSource(
|
| return source;
|
| }
|
|
|
| +bool MediaStreamDependencyFactory::InitializeAudioSource(
|
| + const StreamDeviceInfo& device_info) {
|
| + DVLOG(1) << "MediaStreamDependencyFactory::InitializeAudioSource()";
|
| + const MediaStreamDevice device = device_info.device;
|
| +
|
| + // Initialize the source using audio parameters for the selected
|
| + // capture device.
|
| + WebRtcAudioCapturer* capturer = GetWebRtcAudioDevice()->capturer();
|
| + // TODO(henrika): refactor \content\public\common\media_stream_request.h
|
| + // to allow dependency of media::ChannelLayout and avoid static_cast.
|
| + if (!capturer->Initialize(
|
| + static_cast<media::ChannelLayout>(device.channel_layout),
|
| + device.sample_rate))
|
| + return false;
|
| +
|
| + // Specify which capture device to use. The acquired session id is used
|
| + // for identification.
|
| + // TODO(henrika): the current design does not support a uniqe source
|
| + // for each audio track.
|
| + if (device_info.session_id <= 0)
|
| + return false;
|
| +
|
| + capturer->SetDevice(device_info.session_id);
|
| + return true;
|
| +}
|
| +
|
| scoped_refptr<webrtc::VideoTrackInterface>
|
| MediaStreamDependencyFactory::CreateLocalVideoTrack(
|
| const std::string& label,
|
| @@ -384,10 +432,6 @@ MediaStreamDependencyFactory::GetWebRtcAudioDevice() {
|
| return audio_device_;
|
| }
|
|
|
| -void MediaStreamDependencyFactory::SetAudioDeviceSessionId(int session_id) {
|
| - audio_device_->SetSessionId(session_id);
|
| -}
|
| -
|
| void MediaStreamDependencyFactory::InitializeWorkerThread(
|
| talk_base::Thread** thread,
|
| base::WaitableEvent* event) {
|
|
|