| Index: content/renderer/speech_recognition_dispatcher.h
|
| diff --git a/content/renderer/speech_recognition_dispatcher.h b/content/renderer/speech_recognition_dispatcher.h
|
| index bae0e5d92a484cbd706533010a8ffd28e3f9e5fa..d17b95640ed0213e9f2ae3a3139dd554504b8194 100644
|
| --- a/content/renderer/speech_recognition_dispatcher.h
|
| +++ b/content/renderer/speech_recognition_dispatcher.h
|
| @@ -8,14 +8,24 @@
|
| #include <map>
|
|
|
| #include "base/basictypes.h"
|
| +#include "base/memory/shared_memory.h"
|
| +#include "base/message_loop/message_loop_proxy.h"
|
| +#include "base/sync_socket.h"
|
| #include "content/public/common/speech_recognition_result.h"
|
| #include "content/public/renderer/render_view_observer.h"
|
| +#include "content/renderer/media/speech_recognition_audio_source_provider.h"
|
| +#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
|
| #include "third_party/WebKit/public/platform/WebVector.h"
|
| #include "third_party/WebKit/public/web/WebSpeechRecognitionHandle.h"
|
| #include "third_party/WebKit/public/web/WebSpeechRecognizer.h"
|
|
|
| +namespace media {
|
| +class AudioParameters;
|
| +}
|
| +
|
| namespace content {
|
| class RenderViewImpl;
|
| +class SpeechRecognitionAudioSourceProvider;
|
| struct SpeechRecognitionError;
|
| struct SpeechRecognitionResult;
|
|
|
| @@ -39,6 +49,10 @@ class SpeechRecognitionDispatcher : public RenderViewObserver,
|
| virtual void start(const blink::WebSpeechRecognitionHandle&,
|
| const blink::WebSpeechRecognitionParams&,
|
| blink::WebSpeechRecognizerClient*) OVERRIDE;
|
| + virtual void start(const blink::WebSpeechRecognitionHandle&,
|
| + const blink::WebSpeechRecognitionParams&,
|
| + const blink::WebMediaStreamTrack&,
|
| + blink::WebSpeechRecognizerClient*) OVERRIDE;
|
| virtual void stop(const blink::WebSpeechRecognitionHandle&,
|
| blink::WebSpeechRecognizerClient*) OVERRIDE;
|
| virtual void abort(const blink::WebSpeechRecognitionHandle&,
|
| @@ -53,6 +67,10 @@ class SpeechRecognitionDispatcher : public RenderViewObserver,
|
| void OnRecognitionEnded(int request_id);
|
| void OnResultsRetrieved(int request_id,
|
| const SpeechRecognitionResults& result);
|
| + void OnAudioTrackReady(int session_id, const media::AudioParameters& params,
|
| + base::SharedMemoryHandle handle,
|
| + base::SyncSocket::TransitDescriptor socket);
|
| + virtual void OnAudioTrackStopped();
|
|
|
| int GetOrCreateIDForHandle(const blink::WebSpeechRecognitionHandle& handle);
|
| bool HandleExists(const blink::WebSpeechRecognitionHandle& handle);
|
| @@ -61,6 +79,13 @@ class SpeechRecognitionDispatcher : public RenderViewObserver,
|
| // The WebKit client class that we use to send events back to the JS world.
|
| blink::WebSpeechRecognizerClient* recognizer_client_;
|
|
|
| + // Media stream audio track that the speech recognition connects to.
|
| + // Accessed on the render thread.
|
| + blink::WebMediaStreamTrack audio_track_;
|
| +
|
| + // Audio sink used to provide audio from the track.
|
| + scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_;
|
| +
|
| typedef std::map<int, blink::WebSpeechRecognitionHandle> HandleMap;
|
| HandleMap handle_map_;
|
| int next_id_;
|
|
|