OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ | 5 #ifndef CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ |
6 #define CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ | 6 #define CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ |
7 | 7 |
8 #include <map> | 8 #include <map> |
9 | 9 |
10 #include "base/basictypes.h" | 10 #include "base/basictypes.h" |
| 11 #include "base/memory/shared_memory.h" |
| 12 #include "base/message_loop/message_loop_proxy.h" |
| 13 #include "base/sync_socket.h" |
11 #include "content/public/common/speech_recognition_result.h" | 14 #include "content/public/common/speech_recognition_result.h" |
12 #include "content/public/renderer/render_view_observer.h" | 15 #include "content/public/renderer/render_view_observer.h" |
| 16 #include "content/renderer/speech_recognition_audio_source_provider.h" |
| 17 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" |
13 #include "third_party/WebKit/public/platform/WebVector.h" | 18 #include "third_party/WebKit/public/platform/WebVector.h" |
14 #include "third_party/WebKit/public/web/WebSpeechRecognitionHandle.h" | 19 #include "third_party/WebKit/public/web/WebSpeechRecognitionHandle.h" |
15 #include "third_party/WebKit/public/web/WebSpeechRecognizer.h" | 20 #include "third_party/WebKit/public/web/WebSpeechRecognizer.h" |
16 | 21 |
| 22 namespace media { |
| 23 class AudioParameters; |
| 24 } |
| 25 |
17 namespace content { | 26 namespace content { |
18 class RenderViewImpl; | 27 class RenderViewImpl; |
| 28 class SpeechRecognitionAudioSourceProvider; |
19 struct SpeechRecognitionError; | 29 struct SpeechRecognitionError; |
20 struct SpeechRecognitionResult; | 30 struct SpeechRecognitionResult; |
21 | 31 |
22 // SpeechRecognitionDispatcher is a delegate for methods used by WebKit for | 32 // SpeechRecognitionDispatcher is a delegate for methods used by WebKit for |
23 // scripted JS speech APIs. It's the complement of | 33 // scripted JS speech APIs. It's the complement of |
24 // SpeechRecognitionDispatcherHost (owned by RenderViewHost). | 34 // SpeechRecognitionDispatcherHost (owned by RenderViewHost). |
25 class SpeechRecognitionDispatcher : public RenderViewObserver, | 35 class SpeechRecognitionDispatcher : public RenderViewObserver, |
26 public blink::WebSpeechRecognizer { | 36 public blink::WebSpeechRecognizer { |
27 public: | 37 public: |
28 explicit SpeechRecognitionDispatcher(RenderViewImpl* render_view); | 38 explicit SpeechRecognitionDispatcher(RenderViewImpl* render_view); |
29 virtual ~SpeechRecognitionDispatcher(); | 39 virtual ~SpeechRecognitionDispatcher(); |
30 | 40 |
31 // Aborts all speech recognitions. | 41 // Aborts all speech recognitions. |
32 void AbortAllRecognitions(); | 42 void AbortAllRecognitions(); |
33 | 43 |
34 private: | 44 private: |
35 // RenderViewObserver implementation. | 45 // RenderViewObserver implementation. |
36 virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE; | 46 virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE; |
37 | 47 |
38 // blink::WebSpeechRecognizer implementation. | 48 // blink::WebSpeechRecognizer implementation. |
39 virtual void start(const blink::WebSpeechRecognitionHandle&, | 49 virtual void start(const blink::WebSpeechRecognitionHandle&, |
40 const blink::WebSpeechRecognitionParams&, | 50 const blink::WebSpeechRecognitionParams&, |
41 blink::WebSpeechRecognizerClient*) OVERRIDE; | 51 blink::WebSpeechRecognizerClient*) OVERRIDE; |
| 52 virtual void start(const blink::WebSpeechRecognitionHandle&, |
| 53 const blink::WebSpeechRecognitionParams&, |
| 54 const blink::WebMediaStreamTrack&, |
| 55 blink::WebSpeechRecognizerClient*) OVERRIDE; |
42 virtual void stop(const blink::WebSpeechRecognitionHandle&, | 56 virtual void stop(const blink::WebSpeechRecognitionHandle&, |
43 blink::WebSpeechRecognizerClient*) OVERRIDE; | 57 blink::WebSpeechRecognizerClient*) OVERRIDE; |
44 virtual void abort(const blink::WebSpeechRecognitionHandle&, | 58 virtual void abort(const blink::WebSpeechRecognitionHandle&, |
45 blink::WebSpeechRecognizerClient*) OVERRIDE; | 59 blink::WebSpeechRecognizerClient*) OVERRIDE; |
46 | 60 |
47 void OnRecognitionStarted(int request_id); | 61 void OnRecognitionStarted(int request_id); |
48 void OnAudioStarted(int request_id); | 62 void OnAudioStarted(int request_id); |
49 void OnSoundStarted(int request_id); | 63 void OnSoundStarted(int request_id); |
50 void OnSoundEnded(int request_id); | 64 void OnSoundEnded(int request_id); |
51 void OnAudioEnded(int request_id); | 65 void OnAudioEnded(int request_id); |
52 void OnErrorOccurred(int request_id, const SpeechRecognitionError& error); | 66 void OnErrorOccurred(int request_id, const SpeechRecognitionError& error); |
53 void OnRecognitionEnded(int request_id); | 67 void OnRecognitionEnded(int request_id); |
54 void OnResultsRetrieved(int request_id, | 68 void OnResultsRetrieved(int request_id, |
55 const SpeechRecognitionResults& result); | 69 const SpeechRecognitionResults& result); |
| 70 void OnAudioTrackReady(int session_id, const media::AudioParameters& params, |
| 71 base::SharedMemoryHandle handle, |
| 72 base::SyncSocket::TransitDescriptor socket); |
| 73 virtual void OnAudioTrackStopped(); |
56 | 74 |
57 int GetOrCreateIDForHandle(const blink::WebSpeechRecognitionHandle& handle); | 75 int GetOrCreateIDForHandle(const blink::WebSpeechRecognitionHandle& handle); |
58 bool HandleExists(const blink::WebSpeechRecognitionHandle& handle); | 76 bool HandleExists(const blink::WebSpeechRecognitionHandle& handle); |
59 const blink::WebSpeechRecognitionHandle& GetHandleFromID(int handle_id); | 77 const blink::WebSpeechRecognitionHandle& GetHandleFromID(int handle_id); |
60 | 78 |
61 // The WebKit client class that we use to send events back to the JS world. | 79 // The WebKit client class that we use to send events back to the JS world. |
62 blink::WebSpeechRecognizerClient* recognizer_client_; | 80 blink::WebSpeechRecognizerClient* recognizer_client_; |
63 | 81 |
| 82 // Media stream audio track that the speech recognition connects to. |
| 83 // Accessed on the render thread. |
| 84 blink::WebMediaStreamTrack audio_track_; |
| 85 |
| 86 // Audio sink used to provide audio from the track. |
| 87 scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_; |
| 88 |
64 typedef std::map<int, blink::WebSpeechRecognitionHandle> HandleMap; | 89 typedef std::map<int, blink::WebSpeechRecognitionHandle> HandleMap; |
65 HandleMap handle_map_; | 90 HandleMap handle_map_; |
66 int next_id_; | 91 int next_id_; |
67 | 92 |
68 DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionDispatcher); | 93 DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionDispatcher); |
69 }; | 94 }; |
70 | 95 |
71 } // namespace content | 96 } // namespace content |
72 | 97 |
73 #endif // CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ | 98 #endif // CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ |
OLD | NEW |