Chromium Code Reviews| Index: content/renderer/speech_recognition_dispatcher.h |
| diff --git a/content/renderer/speech_recognition_dispatcher.h b/content/renderer/speech_recognition_dispatcher.h |
| index bae0e5d92a484cbd706533010a8ffd28e3f9e5fa..9ac43f55ed6edbfeee9b5e6c635140fecab99d1a 100644 |
| --- a/content/renderer/speech_recognition_dispatcher.h |
| +++ b/content/renderer/speech_recognition_dispatcher.h |
| @@ -8,14 +8,23 @@ |
| #include <map> |
| #include "base/basictypes.h" |
| +#include "base/memory/shared_memory.h" |
| +#include "base/message_loop/message_loop_proxy.h" |
| #include "content/public/common/speech_recognition_result.h" |
| #include "content/public/renderer/render_view_observer.h" |
| +#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" |
| #include "third_party/WebKit/public/platform/WebVector.h" |
| #include "third_party/WebKit/public/web/WebSpeechRecognitionHandle.h" |
| #include "third_party/WebKit/public/web/WebSpeechRecognizer.h" |
| + |
| +namespace media { |
| +class AudioParameters; |
| +} |
| + |
| namespace content { |
| class RenderViewImpl; |
| +class SpeechRecognitionAudioSourceProvider; |
| struct SpeechRecognitionError; |
| struct SpeechRecognitionResult; |
| @@ -36,6 +45,11 @@ class SpeechRecognitionDispatcher : public RenderViewObserver, |
| virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE; |
| // blink::WebSpeechRecognizer implementation. |
| + virtual void attach(const blink::WebSpeechRecognitionHandle&, |
| + const blink::WebMediaStreamTrack&, |
| + blink::WebSpeechRecognizerClient*) OVERRIDE; |
| + virtual void detach(const blink::WebSpeechRecognitionHandle&, |
| + blink::WebSpeechRecognizerClient*) OVERRIDE; |
| virtual void start(const blink::WebSpeechRecognitionHandle&, |
| const blink::WebSpeechRecognitionParams&, |
| blink::WebSpeechRecognizerClient*) OVERRIDE; |
| @@ -53,6 +67,13 @@ class SpeechRecognitionDispatcher : public RenderViewObserver, |
| void OnRecognitionEnded(int request_id); |
| void OnResultsRetrieved(int request_id, |
| const SpeechRecognitionResults& result); |
| + void OnAudioTrackReady(int session_id, const media::AudioParameters& params, |
| + base::SharedMemoryHandle handle, uint32 length); |
| + |
| + // for receiving audio data via audio track |
|
henrika (OOO until Aug 14)
2014/08/25 14:46:00
nit, capital 'F' end with period '.' as all other
|
| + void OnAudioData(int request_id); |
| + void OnAudioError(int request_id); |
| + void OnAudioChunkProcessed(int request_id); |
| int GetOrCreateIDForHandle(const blink::WebSpeechRecognitionHandle& handle); |
| bool HandleExists(const blink::WebSpeechRecognitionHandle& handle); |
| @@ -61,6 +82,21 @@ class SpeechRecognitionDispatcher : public RenderViewObserver, |
| // The WebKit client class that we use to send events back to the JS world. |
| blink::WebSpeechRecognizerClient* recognizer_client_; |
| + // Media stream audio track that the speech recognition connects to. |
| + // Accessed on the render thread. |
| + blink::WebMediaStreamTrack audio_track_; |
| + |
| + bool audio_track_set_; |
|
henrika (OOO until Aug 14)
2014/08/25 14:46:00
Comment.
...and, capitals for all comments below
|
| + |
| + // Used by policy on which audio sources are allowed |
|
henrika (OOO until Aug 14)
2014/08/25 14:46:00
Rather unclear comment to me. What is meant by "us
|
| + bool is_allowed_audio_track_; |
| + |
| + // audio sink used to provide audio from the track |
| + scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_; |
| + |
| + // for sending signal back to browser |
| + scoped_refptr<base::MessageLoopProxy> render_loop_; |
| + |
| typedef std::map<int, blink::WebSpeechRecognitionHandle> HandleMap; |
| HandleMap handle_map_; |
| int next_id_; |