Index: content/renderer/speech_recognition_dispatcher.h |
diff --git a/content/renderer/speech_recognition_dispatcher.h b/content/renderer/speech_recognition_dispatcher.h |
index bae0e5d92a484cbd706533010a8ffd28e3f9e5fa..553e8ecbb4b26a45826f13e73068a555879f8069 100644 |
--- a/content/renderer/speech_recognition_dispatcher.h |
+++ b/content/renderer/speech_recognition_dispatcher.h |
@@ -8,14 +8,23 @@ |
#include <map> |
#include "base/basictypes.h" |
+#include "base/memory/shared_memory.h" |
+#include "base/message_loop/message_loop_proxy.h" |
+#include "base/native_sync_socket.h" |
#include "content/public/common/speech_recognition_result.h" |
#include "content/public/renderer/render_view_observer.h" |
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" |
#include "third_party/WebKit/public/platform/WebVector.h" |
#include "third_party/WebKit/public/web/WebSpeechRecognitionHandle.h" |
#include "third_party/WebKit/public/web/WebSpeechRecognizer.h" |
+namespace media { |
+class AudioParameters; |
+} |
+ |
namespace content { |
class RenderViewImpl; |
+class SpeechRecognitionAudioSourceProvider; |
struct SpeechRecognitionError; |
struct SpeechRecognitionResult; |
@@ -36,6 +45,11 @@ class SpeechRecognitionDispatcher : public RenderViewObserver, |
virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE; |
// blink::WebSpeechRecognizer implementation. |
+ virtual void attach(const blink::WebSpeechRecognitionHandle&, |
+ const blink::WebMediaStreamTrack&, |
tommi (sloooow) - chröme
2014/08/29 11:25:31
fix indent here and below
burnik
2014/08/29 13:26:18
Done.
|
+ blink::WebSpeechRecognizerClient*) OVERRIDE; |
+ virtual void detach(const blink::WebSpeechRecognitionHandle&, |
+ blink::WebSpeechRecognizerClient*) OVERRIDE; |
virtual void start(const blink::WebSpeechRecognitionHandle&, |
const blink::WebSpeechRecognitionParams&, |
blink::WebSpeechRecognizerClient*) OVERRIDE; |
@@ -53,6 +67,11 @@ class SpeechRecognitionDispatcher : public RenderViewObserver, |
void OnRecognitionEnded(int request_id); |
void OnResultsRetrieved(int request_id, |
const SpeechRecognitionResults& result); |
+ void OnAudioTrackReady(int session_id, |
+ const media::AudioParameters& params, |
+ base::SharedMemoryHandle handle, |
+ base::NativeSyncSocket::Descriptor socket, |
+ uint32 length); |
int GetOrCreateIDForHandle(const blink::WebSpeechRecognitionHandle& handle); |
bool HandleExists(const blink::WebSpeechRecognitionHandle& handle); |
@@ -61,6 +80,21 @@ class SpeechRecognitionDispatcher : public RenderViewObserver, |
// The WebKit client class that we use to send events back to the JS world. |
blink::WebSpeechRecognizerClient* recognizer_client_; |
+ // Media stream audio track that the speech recognition connects to. |
+ // Accessed on the render thread. |
+ blink::WebMediaStreamTrack audio_track_; |
+ |
+ bool audio_track_set_; |
+ |
+ // Used by policy on which audio sources are allowed. |
+ bool is_allowed_audio_track_; |
+ |
+ // Audio sink used to provide audio from the track. |
+ scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_; |
+ |
+ // For sending messages back to browser process. |
+ scoped_refptr<base::MessageLoopProxy> render_loop_; |
no longer working on chromium
2014/08/29 12:23:07
why do you need this?
burnik
2014/08/29 13:26:18
Don't need it anymore.
On 2014/08/29 12:23:07, xia
|
+ |
typedef std::map<int, blink::WebSpeechRecognitionHandle> HandleMap; |
HandleMap handle_map_; |
int next_id_; |