Chromium Code Reviews| Index: content/renderer/speech_recognition_dispatcher.cc |
| diff --git a/content/renderer/speech_recognition_dispatcher.cc b/content/renderer/speech_recognition_dispatcher.cc |
| index 178abf4610c286c751047ff46ec252089f2946bb..3aa0234d644f4dc39b1417d243f3eb71b66d7624 100644 |
| --- a/content/renderer/speech_recognition_dispatcher.cc |
| +++ b/content/renderer/speech_recognition_dispatcher.cc |
| @@ -8,6 +8,7 @@ |
| #include "base/strings/utf_string_conversions.h" |
| #include "content/common/speech_recognition_messages.h" |
| #include "content/renderer/render_view_impl.h" |
| +#include "content/renderer/speech_recognition_audio_source_provider.h" |
| #include "third_party/WebKit/public/platform/WebString.h" |
| #include "third_party/WebKit/public/platform/WebVector.h" |
| #include "third_party/WebKit/public/web/WebSpeechGrammar.h" |
| @@ -29,6 +30,9 @@ SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( |
| RenderViewImpl* render_view) |
| : RenderViewObserver(render_view), |
| recognizer_client_(NULL), |
| + audio_track_set_(false), |
| + is_allowed_audio_track_(false), |
| + render_loop_(base::MessageLoopProxy::current()), |
| next_id_(1) { |
| } |
| @@ -36,6 +40,7 @@ SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { |
| } |
| void SpeechRecognitionDispatcher::AbortAllRecognitions() { |
| + audio_source_provider_.reset(); |
| Send(new SpeechRecognitionHostMsg_AbortAllRequests( |
| routing_id())); |
| } |
| @@ -53,11 +58,31 @@ bool SpeechRecognitionDispatcher::OnMessageReceived( |
| IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) |
| IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, |
| OnResultsRetrieved) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioTrackReady, |
| + OnAudioTrackReady) |
| IPC_MESSAGE_UNHANDLED(handled = false) |
| IPC_END_MESSAGE_MAP() |
| return handled; |
| } |
| +void SpeechRecognitionDispatcher::attach( |
| + const blink::WebSpeechRecognitionHandle& handle, |
| + const blink::WebMediaStreamTrack& audio_track, |
| + blink::WebSpeechRecognizerClient* recognizer_client) { |
| + |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
remove this empty line
burnik
2014/08/29 13:26:17
Done.
|
| + // Check if track is from an allowed source (microphone only for now) |
| + is_allowed_audio_track_ = |
| + SpeechRecognitionAudioSourceProvider::IsAllowedAudioTrack(audio_track); |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
would it make sense to have the IsAllowedAudioTrac
burnik
2014/08/29 13:26:18
My intention is to move away as much logic as poss
|
| + audio_track_ = audio_track; |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
first DCHECK that audio_track_ isn't valid?
burnik
2014/08/29 13:26:17
Method needs refactoring.
On 2014/08/29 11:25:31,
|
| + audio_track_set_ = true; |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
Should we DCHECK(!audio_track_set_); at the top of
|
| +} |
| + |
| +void SpeechRecognitionDispatcher::detach( |
| + const blink::WebSpeechRecognitionHandle& handle, |
| + blink::WebSpeechRecognizerClient* recognizer_client) { |
| + audio_track_set_ = false; |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
reset/clear audio_track_ as well and set is_allowe
burnik
2014/08/29 13:26:17
Method needs refactoring.
On 2014/08/29 11:25:31,
|
| +} |
| + |
| void SpeechRecognitionDispatcher::start( |
| const WebSpeechRecognitionHandle& handle, |
| const WebSpeechRecognitionParams& params, |
| @@ -65,6 +90,17 @@ void SpeechRecognitionDispatcher::start( |
| DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); |
| recognizer_client_ = recognizer_client; |
| + // destroy any previous instance not to starve it waiting on chunk ACKs |
|
no longer working on chromium
2014/08/29 12:23:07
Destroy
burnik
2014/08/29 13:26:17
Done.
|
| + audio_source_provider_.reset(); |
| + |
| + if (audio_track_set_ && !is_allowed_audio_track_) { |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
do you really need is_allowed_audio_track_? If th
burnik
2014/08/29 13:26:18
The JS |webkitSpeechRecognition.audioTrack| is not
|
| + // notify user that the track used is not supported |
| + recognizer_client_->didReceiveError( |
| + handle, |
| + WebString("Provided audioTrack is not supported. Ignoring track."), |
| + WebSpeechRecognizerClient::NotAllowedError); |
|
no longer working on chromium
2014/08/29 12:23:07
probably you should fail the start call in such ca
burnik
2014/08/29 13:26:17
Good for discussion.
On 2014/08/29 12:23:07, xians
|
| + } |
| + |
| SpeechRecognitionHostMsg_StartRequest_Params msg_params; |
| for (size_t i = 0; i < params.grammars().size(); ++i) { |
| const WebSpeechGrammar& grammar = params.grammars()[i]; |
| @@ -78,6 +114,9 @@ void SpeechRecognitionDispatcher::start( |
| msg_params.origin_url = params.origin().toString().utf8(); |
| msg_params.render_view_id = routing_id(); |
| msg_params.request_id = GetOrCreateIDForHandle(handle); |
| + // fall back to default input when the track is not allowed |
| + msg_params.using_audio_track = (audio_track_set_ && is_allowed_audio_track_); |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
looks like you always test these variables togethe
burnik
2014/08/29 13:26:18
Same as comment above. The JS API behaviour is not
|
| + msg_params.peer_process_handle = base::GetCurrentProcessHandle(); |
| // The handle mapping will be removed in |OnRecognitionEnd|. |
| Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); |
| } |
| @@ -85,6 +124,7 @@ void SpeechRecognitionDispatcher::start( |
| void SpeechRecognitionDispatcher::stop( |
| const WebSpeechRecognitionHandle& handle, |
| WebSpeechRecognizerClient* recognizer_client) { |
| + audio_source_provider_.reset(); |
| // Ignore a |stop| issued without a matching |start|. |
| if (recognizer_client_ != recognizer_client || !HandleExists(handle)) |
| return; |
| @@ -95,6 +135,7 @@ void SpeechRecognitionDispatcher::stop( |
| void SpeechRecognitionDispatcher::abort( |
| const WebSpeechRecognitionHandle& handle, |
| WebSpeechRecognizerClient* recognizer_client) { |
| + audio_source_provider_.reset(); |
| // Ignore an |abort| issued without a matching |start|. |
| if (recognizer_client_ != recognizer_client || !HandleExists(handle)) |
| return; |
| @@ -154,6 +195,7 @@ void SpeechRecognitionDispatcher::OnErrorOccurred( |
| recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id), |
| WebSpeechRecognitionResult()); |
| } else { |
| + audio_source_provider_.reset(); |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
I'm assuming that the browser side will be aware o
burnik
2014/08/29 13:26:18
This message is received from the browser. Sockets
|
| recognizer_client_->didReceiveError( |
| GetHandleFromID(request_id), |
| WebString(), // TODO(primiano): message? |
| @@ -174,6 +216,7 @@ void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) { |
| // didEnd may call back synchronously to start a new recognition session, |
| // and we don't want to delete the handle from the map after that happens. |
| handle_map_.erase(request_id); |
| + audio_source_provider_.reset(); |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
same question here since this feels functionally c
burnik
2014/08/29 13:26:17
This effectively kills of the sockets so the audio
|
| recognizer_client_->didEnd(handle); |
| } |
| } |
| @@ -211,6 +254,20 @@ void SpeechRecognitionDispatcher::OnResultsRetrieved( |
| GetHandleFromID(request_id), final, provisional); |
| } |
| +void SpeechRecognitionDispatcher::OnAudioTrackReady( |
| + int request_id, |
| + const media::AudioParameters& params, |
| + base::SharedMemoryHandle memory, |
| + base::NativeSyncSocket::Descriptor socket, |
| + uint32 length) { |
| + // TODO(burnik): Log and DCHECK(!audio_source_provider_). |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
can you do this now?
burnik
2014/08/29 13:26:18
It's still a bit fuzzy because we can only have on
|
| + if (audio_track_.isNull()) { |
| + audio_source_provider_.reset(); |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
should this be done in detach() also?
burnik
2014/08/29 13:26:17
I would have to dig in deeper to check if this wou
|
| + return; |
| + } |
| + audio_source_provider_.reset(new SpeechRecognitionAudioSourceProvider( |
| + audio_track_, params, memory, socket, length)); |
|
tommi (sloooow) - chröme
2014/08/29 11:25:31
indent
burnik
2014/08/29 13:26:17
Done.
|
| +} |
| int SpeechRecognitionDispatcher::GetOrCreateIDForHandle( |
| const WebSpeechRecognitionHandle& handle) { |