OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ | 5 #ifndef CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ |
6 #define CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ | 6 #define CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ |
7 | 7 |
8 #include <map> | 8 #include <map> |
9 | 9 |
10 #include "base/basictypes.h" | 10 #include "base/basictypes.h" |
11 #include "base/memory/scoped_ptr.h" | |
12 #include "base/memory/shared_memory.h" | |
13 #include "base/sync_socket.h" | |
11 #include "content/public/common/speech_recognition_result.h" | 14 #include "content/public/common/speech_recognition_result.h" |
12 #include "content/public/renderer/render_view_observer.h" | 15 #include "content/public/renderer/render_view_observer.h" |
16 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" | |
13 #include "third_party/WebKit/public/platform/WebVector.h" | 17 #include "third_party/WebKit/public/platform/WebVector.h" |
14 #include "third_party/WebKit/public/web/WebSpeechRecognitionHandle.h" | 18 #include "third_party/WebKit/public/web/WebSpeechRecognitionHandle.h" |
15 #include "third_party/WebKit/public/web/WebSpeechRecognizer.h" | 19 #include "third_party/WebKit/public/web/WebSpeechRecognizer.h" |
16 | 20 |
21 namespace media { | |
22 class AudioParameters; | |
23 } | |
24 | |
17 namespace content { | 25 namespace content { |
18 class RenderViewImpl; | 26 class RenderViewImpl; |
27 #if defined(ENABLE_WEBRTC) | |
28 class SpeechRecognitionAudioSink; | |
29 #endif | |
19 struct SpeechRecognitionError; | 30 struct SpeechRecognitionError; |
20 struct SpeechRecognitionResult; | 31 struct SpeechRecognitionResult; |
21 | 32 |
22 // SpeechRecognitionDispatcher is a delegate for methods used by WebKit for | 33 // SpeechRecognitionDispatcher is a delegate for methods used by WebKit for |
23 // scripted JS speech APIs. It's the complement of | 34 // scripted JS speech APIs. It's the complement of |
24 // SpeechRecognitionDispatcherHost (owned by RenderViewHost). | 35 // SpeechRecognitionDispatcherHost (owned by RenderViewHost). |
25 class SpeechRecognitionDispatcher : public RenderViewObserver, | 36 class SpeechRecognitionDispatcher : public RenderViewObserver, |
26 public blink::WebSpeechRecognizer { | 37 public blink::WebSpeechRecognizer { |
27 public: | 38 public: |
28 explicit SpeechRecognitionDispatcher(RenderViewImpl* render_view); | 39 explicit SpeechRecognitionDispatcher(RenderViewImpl* render_view); |
29 virtual ~SpeechRecognitionDispatcher(); | 40 virtual ~SpeechRecognitionDispatcher(); |
30 | 41 |
31 // Aborts all speech recognitions. | 42 // Aborts all speech recognitions. |
32 void AbortAllRecognitions(); | 43 void AbortAllRecognitions(); |
33 | 44 |
34 private: | 45 private: |
35 // RenderViewObserver implementation. | 46 // RenderViewObserver implementation. |
36 virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE; | 47 virtual bool OnMessageReceived(const IPC::Message& message) override; |
37 | 48 |
38 // blink::WebSpeechRecognizer implementation. | 49 // blink::WebSpeechRecognizer implementation. |
39 virtual void start(const blink::WebSpeechRecognitionHandle&, | 50 virtual void start(const blink::WebSpeechRecognitionHandle&, |
40 const blink::WebSpeechRecognitionParams&, | 51 const blink::WebSpeechRecognitionParams&, |
41 blink::WebSpeechRecognizerClient*) OVERRIDE; | 52 blink::WebSpeechRecognizerClient*) override; |
jamesr
2014/10/08 19:14:11
not lgtm, you should never use override for cross-
burnik
2014/10/09 07:35:08
Done.
| |
42 virtual void stop(const blink::WebSpeechRecognitionHandle&, | 53 virtual void stop(const blink::WebSpeechRecognitionHandle&, |
43 blink::WebSpeechRecognizerClient*) OVERRIDE; | 54 blink::WebSpeechRecognizerClient*) override; |
44 virtual void abort(const blink::WebSpeechRecognitionHandle&, | 55 virtual void abort(const blink::WebSpeechRecognitionHandle&, |
45 blink::WebSpeechRecognizerClient*) OVERRIDE; | 56 blink::WebSpeechRecognizerClient*) override; |
46 | 57 |
47 void OnRecognitionStarted(int request_id); | 58 void OnRecognitionStarted(int request_id); |
48 void OnAudioStarted(int request_id); | 59 void OnAudioStarted(int request_id); |
49 void OnSoundStarted(int request_id); | 60 void OnSoundStarted(int request_id); |
50 void OnSoundEnded(int request_id); | 61 void OnSoundEnded(int request_id); |
51 void OnAudioEnded(int request_id); | 62 void OnAudioEnded(int request_id); |
52 void OnErrorOccurred(int request_id, const SpeechRecognitionError& error); | 63 void OnErrorOccurred(int request_id, const SpeechRecognitionError& error); |
53 void OnRecognitionEnded(int request_id); | 64 void OnRecognitionEnded(int request_id); |
54 void OnResultsRetrieved(int request_id, | 65 void OnResultsRetrieved(int request_id, |
55 const SpeechRecognitionResults& result); | 66 const SpeechRecognitionResults& result); |
67 void OnSharedAudioBusReady(int session_id, | |
68 const media::AudioParameters& params, | |
69 const base::SharedMemoryHandle handle, | |
70 const base::SyncSocket::TransitDescriptor socket); | |
71 | |
72 void ResetAudioSink(); | |
56 | 73 |
57 int GetOrCreateIDForHandle(const blink::WebSpeechRecognitionHandle& handle); | 74 int GetOrCreateIDForHandle(const blink::WebSpeechRecognitionHandle& handle); |
58 bool HandleExists(const blink::WebSpeechRecognitionHandle& handle); | 75 bool HandleExists(const blink::WebSpeechRecognitionHandle& handle); |
59 const blink::WebSpeechRecognitionHandle& GetHandleFromID(int handle_id); | 76 const blink::WebSpeechRecognitionHandle& GetHandleFromID(int handle_id); |
60 | 77 |
61 // The WebKit client class that we use to send events back to the JS world. | 78 // The WebKit client class that we use to send events back to the JS world. |
62 blink::WebSpeechRecognizerClient* recognizer_client_; | 79 blink::WebSpeechRecognizerClient* recognizer_client_; |
63 | 80 |
81 #if defined(ENABLE_WEBRTC) | |
82 // Media stream audio track that the speech recognition connects to. | |
83 // Accessed on the render thread. | |
84 blink::WebMediaStreamTrack audio_track_; | |
85 | |
86 // Audio sink used to provide audio from the track. | |
87 scoped_ptr<SpeechRecognitionAudioSink> speech_audio_sink_; | |
88 #endif | |
89 | |
64 typedef std::map<int, blink::WebSpeechRecognitionHandle> HandleMap; | 90 typedef std::map<int, blink::WebSpeechRecognitionHandle> HandleMap; |
65 HandleMap handle_map_; | 91 HandleMap handle_map_; |
66 int next_id_; | 92 int next_id_; |
67 | 93 |
68 DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionDispatcher); | 94 DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionDispatcher); |
69 }; | 95 }; |
70 | 96 |
71 } // namespace content | 97 } // namespace content |
72 | 98 |
73 #endif // CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ | 99 #endif // CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ |
OLD | NEW |