Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(15)

Side by Side Diff: content/renderer/speech_recognition_dispatcher.cc

Issue 499233003: Binding media stream audio track to speech recognition [renderer] (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: SyncSocket leak and FIFO fixes. Test 8-192KHz for input. Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/speech_recognition_dispatcher.h" 5 #include "content/renderer/speech_recognition_dispatcher.h"
6 6
7 #include "base/basictypes.h" 7 #include "base/basictypes.h"
8 #include "base/strings/utf_string_conversions.h" 8 #include "base/strings/utf_string_conversions.h"
9 #include "content/common/speech_recognition_messages.h" 9 #include "content/common/speech_recognition_messages.h"
10 #include "content/renderer/render_view_impl.h" 10 #include "content/renderer/render_view_impl.h"
(...skipping 11 matching lines...) Expand all
22 using blink::WebSpeechRecognitionResult; 22 using blink::WebSpeechRecognitionResult;
23 using blink::WebSpeechRecognitionParams; 23 using blink::WebSpeechRecognitionParams;
24 using blink::WebSpeechRecognizerClient; 24 using blink::WebSpeechRecognizerClient;
25 25
26 namespace content { 26 namespace content {
27 27
28 SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( 28 SpeechRecognitionDispatcher::SpeechRecognitionDispatcher(
29 RenderViewImpl* render_view) 29 RenderViewImpl* render_view)
30 : RenderViewObserver(render_view), 30 : RenderViewObserver(render_view),
31 recognizer_client_(NULL), 31 recognizer_client_(NULL),
32 next_id_(1) { 32 next_id_(1) { }
33 }
34 33
35 SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { 34 SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() {
36 } 35 }
37 36
38 void SpeechRecognitionDispatcher::AbortAllRecognitions() { 37 void SpeechRecognitionDispatcher::AbortAllRecognitions() {
38 audio_source_provider_.reset();
39 Send(new SpeechRecognitionHostMsg_AbortAllRequests( 39 Send(new SpeechRecognitionHostMsg_AbortAllRequests(
40 routing_id())); 40 routing_id()));
41 } 41 }
42 42
43 bool SpeechRecognitionDispatcher::OnMessageReceived( 43 bool SpeechRecognitionDispatcher::OnMessageReceived(
44 const IPC::Message& message) { 44 const IPC::Message& message) {
45 bool handled = true; 45 bool handled = true;
46 IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message) 46 IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message)
47 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted) 47 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted)
48 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted) 48 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted)
49 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted) 49 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted)
50 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded) 50 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded)
51 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded) 51 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded)
52 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred, OnErrorOccurred) 52 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred, OnErrorOccurred)
53 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) 53 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded)
54 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, 54 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved,
55 OnResultsRetrieved) 55 OnResultsRetrieved)
56 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioTrackReady, OnAudioTrackReady)
56 IPC_MESSAGE_UNHANDLED(handled = false) 57 IPC_MESSAGE_UNHANDLED(handled = false)
57 IPC_END_MESSAGE_MAP() 58 IPC_END_MESSAGE_MAP()
58 return handled; 59 return handled;
59 } 60 }
60 61
61 void SpeechRecognitionDispatcher::start( 62 void SpeechRecognitionDispatcher::start(
62 const WebSpeechRecognitionHandle& handle, 63 const WebSpeechRecognitionHandle& handle,
63 const WebSpeechRecognitionParams& params, 64 const WebSpeechRecognitionParams& params,
64 WebSpeechRecognizerClient* recognizer_client) { 65 WebSpeechRecognizerClient* recognizer_client) {
65 DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); 66 DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client);
66 recognizer_client_ = recognizer_client; 67 recognizer_client_ = recognizer_client;
67 68
69 const blink::WebMediaStreamTrack track = params.audioTrack();
70 if (!track.isNull()) {
71 // Check if this type of track is allowed by implemented policy.
72 if (SpeechRecognitionAudioSourceProvider::IsSupportedTrack(track)) {
73 audio_track_.assign(track);
74 } else {
75 audio_track_.reset();
76 // Notify user that the track used is not supported.
77 recognizer_client_->didReceiveError(
78 handle,
79 WebString("Provided audioTrack is not supported. Ignoring track."),
80 WebSpeechRecognizerClient::NotAllowedError);
no longer working on chromium 2014/09/23 10:09:13 return here, since we are failing the start()
burnik 2014/09/23 12:39:21 Done.
81 }
82 }
83
84 // Destroy any previous instance not to starve it waiting on chunk ACKs.
no longer working on chromium 2014/09/23 10:09:13 Please add more comment to explain why we stop the
burnik 2014/09/23 12:39:21 Done.
85 audio_source_provider_.reset();
86
68 SpeechRecognitionHostMsg_StartRequest_Params msg_params; 87 SpeechRecognitionHostMsg_StartRequest_Params msg_params;
69 for (size_t i = 0; i < params.grammars().size(); ++i) { 88 for (size_t i = 0; i < params.grammars().size(); ++i) {
70 const WebSpeechGrammar& grammar = params.grammars()[i]; 89 const WebSpeechGrammar& grammar = params.grammars()[i];
71 msg_params.grammars.push_back( 90 msg_params.grammars.push_back(
72 SpeechRecognitionGrammar(grammar.src().spec(), grammar.weight())); 91 SpeechRecognitionGrammar(grammar.src().spec(), grammar.weight()));
73 } 92 }
74 msg_params.language = base::UTF16ToUTF8(params.language()); 93 msg_params.language = base::UTF16ToUTF8(params.language());
75 msg_params.max_hypotheses = static_cast<uint32>(params.maxAlternatives()); 94 msg_params.max_hypotheses = static_cast<uint32>(params.maxAlternatives());
76 msg_params.continuous = params.continuous(); 95 msg_params.continuous = params.continuous();
77 msg_params.interim_results = params.interimResults(); 96 msg_params.interim_results = params.interimResults();
78 msg_params.origin_url = params.origin().toString().utf8(); 97 msg_params.origin_url = params.origin().toString().utf8();
79 msg_params.render_view_id = routing_id(); 98 msg_params.render_view_id = routing_id();
80 msg_params.request_id = GetOrCreateIDForHandle(handle); 99 msg_params.request_id = GetOrCreateIDForHandle(handle);
100 // fall back to default input when the track is not allowed
101 msg_params.using_audio_track = !audio_track_.isNull();
81 // The handle mapping will be removed in |OnRecognitionEnd|. 102 // The handle mapping will be removed in |OnRecognitionEnd|.
82 Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); 103 Send(new SpeechRecognitionHostMsg_StartRequest(msg_params));
83 } 104 }
84 105
85 void SpeechRecognitionDispatcher::stop( 106 void SpeechRecognitionDispatcher::stop(
86 const WebSpeechRecognitionHandle& handle, 107 const WebSpeechRecognitionHandle& handle,
87 WebSpeechRecognizerClient* recognizer_client) { 108 WebSpeechRecognizerClient* recognizer_client) {
109 audio_source_provider_.reset();
88 // Ignore a |stop| issued without a matching |start|. 110 // Ignore a |stop| issued without a matching |start|.
89 if (recognizer_client_ != recognizer_client || !HandleExists(handle)) 111 if (recognizer_client_ != recognizer_client || !HandleExists(handle))
90 return; 112 return;
91 Send(new SpeechRecognitionHostMsg_StopCaptureRequest( 113 Send(new SpeechRecognitionHostMsg_StopCaptureRequest(
92 routing_id(), GetOrCreateIDForHandle(handle))); 114 routing_id(), GetOrCreateIDForHandle(handle)));
93 } 115 }
94 116
95 void SpeechRecognitionDispatcher::abort( 117 void SpeechRecognitionDispatcher::abort(
96 const WebSpeechRecognitionHandle& handle, 118 const WebSpeechRecognitionHandle& handle,
97 WebSpeechRecognizerClient* recognizer_client) { 119 WebSpeechRecognizerClient* recognizer_client) {
120 audio_source_provider_.reset();
98 // Ignore an |abort| issued without a matching |start|. 121 // Ignore an |abort| issued without a matching |start|.
99 if (recognizer_client_ != recognizer_client || !HandleExists(handle)) 122 if (recognizer_client_ != recognizer_client || !HandleExists(handle))
100 return; 123 return;
101 Send(new SpeechRecognitionHostMsg_AbortRequest( 124 Send(new SpeechRecognitionHostMsg_AbortRequest(
102 routing_id(), GetOrCreateIDForHandle(handle))); 125 routing_id(), GetOrCreateIDForHandle(handle)));
103 } 126 }
104 127
105 void SpeechRecognitionDispatcher::OnRecognitionStarted(int request_id) { 128 void SpeechRecognitionDispatcher::OnRecognitionStarted(int request_id) {
106 recognizer_client_->didStart(GetHandleFromID(request_id)); 129 recognizer_client_->didStart(GetHandleFromID(request_id));
107 } 130 }
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
144 case SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR: 167 case SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR:
145 return WebSpeechRecognizerClient::BadGrammarError; 168 return WebSpeechRecognizerClient::BadGrammarError;
146 } 169 }
147 NOTREACHED(); 170 NOTREACHED();
148 return WebSpeechRecognizerClient::OtherError; 171 return WebSpeechRecognizerClient::OtherError;
149 } 172 }
150 173
151 void SpeechRecognitionDispatcher::OnErrorOccurred( 174 void SpeechRecognitionDispatcher::OnErrorOccurred(
152 int request_id, const SpeechRecognitionError& error) { 175 int request_id, const SpeechRecognitionError& error) {
153 if (error.code == SPEECH_RECOGNITION_ERROR_NO_MATCH) { 176 if (error.code == SPEECH_RECOGNITION_ERROR_NO_MATCH) {
154 recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id), 177 recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id),
no longer working on chromium 2014/09/23 10:09:13 what will happen when getting a didReceiveNoMatch
burnik 2014/09/23 12:39:21 This is an API protocol thing as far as I know, no
155 WebSpeechRecognitionResult()); 178 WebSpeechRecognitionResult());
156 } else { 179 } else {
180 audio_source_provider_.reset();
157 recognizer_client_->didReceiveError( 181 recognizer_client_->didReceiveError(
158 GetHandleFromID(request_id), 182 GetHandleFromID(request_id),
159 WebString(), // TODO(primiano): message? 183 WebString(), // TODO(primiano): message?
160 WebKitErrorCode(error.code)); 184 WebKitErrorCode(error.code));
161 } 185 }
162 } 186 }
163 187
164 void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) { 188 void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) {
165 // TODO(tommi): It is possible that the handle isn't found in the array if 189 // TODO(tommi): It is possible that the handle isn't found in the array if
166 // the user just refreshed the page. It seems that we then get a notification 190 // the user just refreshed the page. It seems that we then get a notification
167 // for the previously loaded instance of the page. 191 // for the previously loaded instance of the page.
168 HandleMap::iterator iter = handle_map_.find(request_id); 192 HandleMap::iterator iter = handle_map_.find(request_id);
169 if (iter == handle_map_.end()) { 193 if (iter == handle_map_.end()) {
170 DLOG(ERROR) << "OnRecognitionEnded called for a handle that doesn't exist"; 194 DLOG(ERROR) << "OnRecognitionEnded called for a handle that doesn't exist";
171 } else { 195 } else {
172 WebSpeechRecognitionHandle handle = iter->second; 196 WebSpeechRecognitionHandle handle = iter->second;
173 // Note: we need to erase the handle from the map *before* calling didEnd. 197 // Note: we need to erase the handle from the map *before* calling didEnd.
174 // didEnd may call back synchronously to start a new recognition session, 198 // didEnd may call back synchronously to start a new recognition session,
175 // and we don't want to delete the handle from the map after that happens. 199 // and we don't want to delete the handle from the map after that happens.
176 handle_map_.erase(request_id); 200 handle_map_.erase(request_id);
201 audio_source_provider_.reset();
177 recognizer_client_->didEnd(handle); 202 recognizer_client_->didEnd(handle);
178 } 203 }
179 } 204 }
180 205
181 void SpeechRecognitionDispatcher::OnResultsRetrieved( 206 void SpeechRecognitionDispatcher::OnResultsRetrieved(
182 int request_id, const SpeechRecognitionResults& results) { 207 int request_id, const SpeechRecognitionResults& results) {
183 size_t provisional_count = 0; 208 size_t provisional_count = 0;
184 SpeechRecognitionResults::const_iterator it = results.begin(); 209 SpeechRecognitionResults::const_iterator it = results.begin();
185 for (; it != results.end(); ++it) { 210 for (; it != results.end(); ++it) {
186 if (it->is_provisional) 211 if (it->is_provisional)
(...skipping 17 matching lines...) Expand all
204 transcripts[i] = result.hypotheses[i].utterance; 229 transcripts[i] = result.hypotheses[i].utterance;
205 confidences[i] = static_cast<float>(result.hypotheses[i].confidence); 230 confidences[i] = static_cast<float>(result.hypotheses[i].confidence);
206 } 231 }
207 webkit_result->assign(transcripts, confidences, !result.is_provisional); 232 webkit_result->assign(transcripts, confidences, !result.is_provisional);
208 } 233 }
209 234
210 recognizer_client_->didReceiveResults( 235 recognizer_client_->didReceiveResults(
211 GetHandleFromID(request_id), final, provisional); 236 GetHandleFromID(request_id), final, provisional);
212 } 237 }
213 238
239 void SpeechRecognitionDispatcher::OnAudioTrackReady(
240 int request_id,
241 const media::AudioParameters& params,
242 base::SharedMemoryHandle memory,
243 base::SyncSocket::TransitDescriptor descriptor) {
244 DCHECK(!audio_source_provider_.get());
245 if (audio_track_.isNull()) {
246 audio_source_provider_.reset();
247 return;
248 }
249
250 // Create socket here and pass ownership to the |audio_source_provider_|.
251 scoped_ptr<base::SyncSocket> socket(
252 new base::SyncSocket(base::SyncSocket::UnwrapHandle(descriptor)));
253
254 audio_source_provider_.reset(new SpeechRecognitionAudioSourceProvider(
255 audio_track_, params, memory, socket.release(),
no longer working on chromium 2014/09/23 10:09:13 you don't need to create a socket at all, just pas
burnik 2014/09/23 12:39:21 That would be true if the unit test did not have a
256 base::Bind(&SpeechRecognitionDispatcher::OnAudioTrackStopped,
257 base::Unretained(this))));
258 }
214 259
215 int SpeechRecognitionDispatcher::GetOrCreateIDForHandle( 260 int SpeechRecognitionDispatcher::GetOrCreateIDForHandle(
216 const WebSpeechRecognitionHandle& handle) { 261 const WebSpeechRecognitionHandle& handle) {
217 // Search first for an existing mapping. 262 // Search first for an existing mapping.
218 for (HandleMap::iterator iter = handle_map_.begin(); 263 for (HandleMap::iterator iter = handle_map_.begin();
219 iter != handle_map_.end(); 264 iter != handle_map_.end();
220 ++iter) { 265 ++iter) {
221 if (iter->second.equals(handle)) 266 if (iter->second.equals(handle))
222 return iter->first; 267 return iter->first;
223 } 268 }
224 // If no existing mapping found, create a new one. 269 // If no existing mapping found, create a new one.
225 const int new_id = next_id_; 270 const int new_id = next_id_;
226 handle_map_[new_id] = handle; 271 handle_map_[new_id] = handle;
227 ++next_id_; 272 ++next_id_;
228 return new_id; 273 return new_id;
229 } 274 }
230 275
231 bool SpeechRecognitionDispatcher::HandleExists( 276 bool SpeechRecognitionDispatcher::HandleExists(
232 const WebSpeechRecognitionHandle& handle) { 277 const WebSpeechRecognitionHandle& handle) {
233 for (HandleMap::iterator iter = handle_map_.begin(); 278 for (HandleMap::iterator iter = handle_map_.begin();
234 iter != handle_map_.end(); 279 iter != handle_map_.end();
235 ++iter) { 280 ++iter) {
236 if (iter->second.equals(handle)) 281 if (iter->second.equals(handle))
237 return true; 282 return true;
238 } 283 }
239 return false; 284 return false;
240 } 285 }
241 286
287 void SpeechRecognitionDispatcher::OnAudioTrackStopped() {
288 audio_track_.reset();
289 }
290
242 const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( 291 const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID(
243 int request_id) { 292 int request_id) {
244 HandleMap::iterator iter = handle_map_.find(request_id); 293 HandleMap::iterator iter = handle_map_.find(request_id);
245 DCHECK(iter != handle_map_.end()); 294 DCHECK(iter != handle_map_.end());
246 return iter->second; 295 return iter->second;
247 } 296 }
248 297
249 } // namespace content 298 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698