Index: content/renderer/speech_recognition_dispatcher.h |
diff --git a/content/renderer/speech_recognition_dispatcher.h b/content/renderer/speech_recognition_dispatcher.h |
index bae0e5d92a484cbd706533010a8ffd28e3f9e5fa..41d477f14ccca80c2514804077093d70ac976ee0 100644 |
--- a/content/renderer/speech_recognition_dispatcher.h |
+++ b/content/renderer/speech_recognition_dispatcher.h |
@@ -8,14 +8,24 @@ |
#include <map> |
#include "base/basictypes.h" |
+#include "base/memory/shared_memory.h" |
+#include "base/message_loop/message_loop_proxy.h" |
+#include "base/sync_socket.h" |
#include "content/public/common/speech_recognition_result.h" |
#include "content/public/renderer/render_view_observer.h" |
+#include "content/renderer/speech_recognition_audio_source_provider.h" |
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" |
#include "third_party/WebKit/public/platform/WebVector.h" |
#include "third_party/WebKit/public/web/WebSpeechRecognitionHandle.h" |
#include "third_party/WebKit/public/web/WebSpeechRecognizer.h" |
+namespace media { |
+class AudioParameters; |
+} |
+ |
namespace content { |
class RenderViewImpl; |
+class SpeechRecognitionAudioSourceProvider; |
struct SpeechRecognitionError; |
struct SpeechRecognitionResult; |
@@ -36,6 +46,11 @@ class SpeechRecognitionDispatcher : public RenderViewObserver, |
virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE; |
// blink::WebSpeechRecognizer implementation. |
+ virtual void attach(const blink::WebSpeechRecognitionHandle&, |
+ const blink::WebMediaStreamTrack&, |
+ blink::WebSpeechRecognizerClient*)/*OVERRIDE*/; |
+ virtual void detach(const blink::WebSpeechRecognitionHandle&, |
+ blink::WebSpeechRecognizerClient*)/*OVERRIDE*/; |
virtual void start(const blink::WebSpeechRecognitionHandle&, |
const blink::WebSpeechRecognitionParams&, |
blink::WebSpeechRecognizerClient*) OVERRIDE; |
@@ -53,6 +68,11 @@ class SpeechRecognitionDispatcher : public RenderViewObserver, |
void OnRecognitionEnded(int request_id); |
void OnResultsRetrieved(int request_id, |
const SpeechRecognitionResults& result); |
+ void OnAudioTrackReady(int session_id, const media::AudioParameters& params, |
+ base::SharedMemoryHandle handle, |
+ base::SyncSocket::TransitDescriptor socket); |
+ virtual void OnAudioTrackError( |
+ SpeechRecognitionAudioSourceProvider::ErrorState error); |
int GetOrCreateIDForHandle(const blink::WebSpeechRecognitionHandle& handle); |
bool HandleExists(const blink::WebSpeechRecognitionHandle& handle); |
@@ -61,6 +81,19 @@ class SpeechRecognitionDispatcher : public RenderViewObserver, |
// The WebKit client class that we use to send events back to the JS world. |
blink::WebSpeechRecognizerClient* recognizer_client_; |
+ // Media stream audio track that the speech recognition connects to. |
+ // Accessed on the render thread. |
+ blink::WebMediaStreamTrack audio_track_; |
+ |
+ // Whether the track is attached or detached for this session. |
+ bool audio_track_set_; |
+ |
+ // Used by policy on which audio sources are allowed. |
+ bool is_allowed_audio_track_; |
+ |
+ // Audio sink used to provide audio from the track. |
+ scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_; |
+ |
typedef std::map<int, blink::WebSpeechRecognitionHandle> HandleMap; |
HandleMap handle_map_; |
int next_id_; |