Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(361)

Side by Side Diff: content/renderer/speech_recognition_dispatcher.cc

Issue 2517953003: Move enable_webrtc to a buildflag header. (Closed)
Patch Set: Fix Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/renderer/speech_recognition_dispatcher.h ('k') | content/shell/browser/shell.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/speech_recognition_dispatcher.h" 5 #include "content/renderer/speech_recognition_dispatcher.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 #include <utility> 9 #include <utility>
10 10
11 #include "base/strings/utf_string_conversions.h" 11 #include "base/strings/utf_string_conversions.h"
12 #include "content/common/speech_recognition_messages.h" 12 #include "content/common/speech_recognition_messages.h"
13 #include "content/renderer/render_view_impl.h" 13 #include "content/renderer/render_view_impl.h"
14 #include "media/media_features.h"
14 #include "third_party/WebKit/public/platform/WebString.h" 15 #include "third_party/WebKit/public/platform/WebString.h"
15 #include "third_party/WebKit/public/platform/WebVector.h" 16 #include "third_party/WebKit/public/platform/WebVector.h"
16 #include "third_party/WebKit/public/web/WebSpeechGrammar.h" 17 #include "third_party/WebKit/public/web/WebSpeechGrammar.h"
17 #include "third_party/WebKit/public/web/WebSpeechRecognitionParams.h" 18 #include "third_party/WebKit/public/web/WebSpeechRecognitionParams.h"
18 #include "third_party/WebKit/public/web/WebSpeechRecognitionResult.h" 19 #include "third_party/WebKit/public/web/WebSpeechRecognitionResult.h"
19 #include "third_party/WebKit/public/web/WebSpeechRecognizerClient.h" 20 #include "third_party/WebKit/public/web/WebSpeechRecognizerClient.h"
20 21
21 #if defined(ENABLE_WEBRTC) 22 #if BUILDFLAG(ENABLE_WEBRTC)
22 #include "content/renderer/media/speech_recognition_audio_sink.h" 23 #include "content/renderer/media/speech_recognition_audio_sink.h"
23 #endif 24 #endif
24 25
25 using blink::WebVector; 26 using blink::WebVector;
26 using blink::WebString; 27 using blink::WebString;
27 using blink::WebSpeechGrammar; 28 using blink::WebSpeechGrammar;
28 using blink::WebSpeechRecognitionHandle; 29 using blink::WebSpeechRecognitionHandle;
29 using blink::WebSpeechRecognitionResult; 30 using blink::WebSpeechRecognitionResult;
30 using blink::WebSpeechRecognitionParams; 31 using blink::WebSpeechRecognitionParams;
31 using blink::WebSpeechRecognizerClient; 32 using blink::WebSpeechRecognizerClient;
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
70 delete this; 71 delete this;
71 } 72 }
72 73
73 void SpeechRecognitionDispatcher::start( 74 void SpeechRecognitionDispatcher::start(
74 const WebSpeechRecognitionHandle& handle, 75 const WebSpeechRecognitionHandle& handle,
75 const WebSpeechRecognitionParams& params, 76 const WebSpeechRecognitionParams& params,
76 WebSpeechRecognizerClient* recognizer_client) { 77 WebSpeechRecognizerClient* recognizer_client) {
77 DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); 78 DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client);
78 recognizer_client_ = recognizer_client; 79 recognizer_client_ = recognizer_client;
79 80
80 #if defined(ENABLE_WEBRTC) 81 #if BUILDFLAG(ENABLE_WEBRTC)
81 const blink::WebMediaStreamTrack track = params.audioTrack(); 82 const blink::WebMediaStreamTrack track = params.audioTrack();
82 if (!track.isNull()) { 83 if (!track.isNull()) {
83 // Check if this type of track is allowed by implemented policy. 84 // Check if this type of track is allowed by implemented policy.
84 if (SpeechRecognitionAudioSink::IsSupportedTrack(track)) { 85 if (SpeechRecognitionAudioSink::IsSupportedTrack(track)) {
85 audio_track_.assign(track); 86 audio_track_.assign(track);
86 } else { 87 } else {
87 audio_track_.reset(); 88 audio_track_.reset();
88 // Notify user that the track used is not supported. 89 // Notify user that the track used is not supported.
89 recognizer_client_->didReceiveError( 90 recognizer_client_->didReceiveError(
90 handle, 91 handle,
(...skipping 17 matching lines...) Expand all
108 grammar.weight())); 109 grammar.weight()));
109 } 110 }
110 msg_params.language = 111 msg_params.language =
111 base::UTF16ToUTF8(base::StringPiece16(params.language())); 112 base::UTF16ToUTF8(base::StringPiece16(params.language()));
112 msg_params.max_hypotheses = static_cast<uint32_t>(params.maxAlternatives()); 113 msg_params.max_hypotheses = static_cast<uint32_t>(params.maxAlternatives());
113 msg_params.continuous = params.continuous(); 114 msg_params.continuous = params.continuous();
114 msg_params.interim_results = params.interimResults(); 115 msg_params.interim_results = params.interimResults();
115 msg_params.origin_url = params.origin().toString().utf8(); 116 msg_params.origin_url = params.origin().toString().utf8();
116 msg_params.render_view_id = routing_id(); 117 msg_params.render_view_id = routing_id();
117 msg_params.request_id = GetOrCreateIDForHandle(handle); 118 msg_params.request_id = GetOrCreateIDForHandle(handle);
118 #if defined(ENABLE_WEBRTC) 119 #if BUILDFLAG(ENABLE_WEBRTC)
119 // Fall back to default input when the track is not allowed. 120 // Fall back to default input when the track is not allowed.
120 msg_params.using_audio_track = !audio_track_.isNull(); 121 msg_params.using_audio_track = !audio_track_.isNull();
121 #else 122 #else
122 msg_params.using_audio_track = false; 123 msg_params.using_audio_track = false;
123 #endif 124 #endif
124 // The handle mapping will be removed in |OnRecognitionEnd|. 125 // The handle mapping will be removed in |OnRecognitionEnd|.
125 Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); 126 Send(new SpeechRecognitionHostMsg_StartRequest(msg_params));
126 } 127 }
127 128
128 void SpeechRecognitionDispatcher::stop( 129 void SpeechRecognitionDispatcher::stop(
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
260 261
261 recognizer_client_->didReceiveResults( 262 recognizer_client_->didReceiveResults(
262 GetHandleFromID(request_id), final, provisional); 263 GetHandleFromID(request_id), final, provisional);
263 } 264 }
264 265
265 void SpeechRecognitionDispatcher::OnAudioReceiverReady( 266 void SpeechRecognitionDispatcher::OnAudioReceiverReady(
266 int request_id, 267 int request_id,
267 const media::AudioParameters& params, 268 const media::AudioParameters& params,
268 const base::SharedMemoryHandle memory, 269 const base::SharedMemoryHandle memory,
269 const base::SyncSocket::TransitDescriptor descriptor) { 270 const base::SyncSocket::TransitDescriptor descriptor) {
270 #if defined(ENABLE_WEBRTC) 271 #if BUILDFLAG(ENABLE_WEBRTC)
271 DCHECK(!speech_audio_sink_.get()); 272 DCHECK(!speech_audio_sink_.get());
272 if (audio_track_.isNull()) { 273 if (audio_track_.isNull()) {
273 ResetAudioSink(); 274 ResetAudioSink();
274 return; 275 return;
275 } 276 }
276 277
277 // The instantiation and type of SyncSocket is up to the client since it 278 // The instantiation and type of SyncSocket is up to the client since it
278 // is dependency injected to the SpeechRecognitionAudioSink. 279 // is dependency injected to the SpeechRecognitionAudioSink.
279 std::unique_ptr<base::SyncSocket> socket(new base::CancelableSyncSocket( 280 std::unique_ptr<base::SyncSocket> socket(new base::CancelableSyncSocket(
280 base::SyncSocket::UnwrapHandle(descriptor))); 281 base::SyncSocket::UnwrapHandle(descriptor)));
(...skipping 26 matching lines...) Expand all
307 for (HandleMap::iterator iter = handle_map_.begin(); 308 for (HandleMap::iterator iter = handle_map_.begin();
308 iter != handle_map_.end(); 309 iter != handle_map_.end();
309 ++iter) { 310 ++iter) {
310 if (iter->second.equals(handle)) 311 if (iter->second.equals(handle))
311 return true; 312 return true;
312 } 313 }
313 return false; 314 return false;
314 } 315 }
315 316
316 void SpeechRecognitionDispatcher::ResetAudioSink() { 317 void SpeechRecognitionDispatcher::ResetAudioSink() {
317 #if defined(ENABLE_WEBRTC) 318 #if BUILDFLAG(ENABLE_WEBRTC)
318 speech_audio_sink_.reset(); 319 speech_audio_sink_.reset();
319 #endif 320 #endif
320 } 321 }
321 322
322 const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( 323 const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID(
323 int request_id) { 324 int request_id) {
324 HandleMap::iterator iter = handle_map_.find(request_id); 325 HandleMap::iterator iter = handle_map_.find(request_id);
325 CHECK(iter != handle_map_.end()); 326 CHECK(iter != handle_map_.end());
326 return iter->second; 327 return iter->second;
327 } 328 }
328 329
329 } // namespace content 330 } // namespace content
OLDNEW
« no previous file with comments | « content/renderer/speech_recognition_dispatcher.h ('k') | content/shell/browser/shell.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698