 Chromium Code Reviews
 Chromium Code Reviews Issue 499233003:
  Binding media stream audio track to speech recognition [renderer]  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@master
    
  
    Issue 499233003:
  Binding media stream audio track to speech recognition [renderer]  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@master| OLD | NEW | 
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include "content/renderer/speech_recognition_dispatcher.h" | 5 #include "content/renderer/speech_recognition_dispatcher.h" | 
| 6 | 6 | 
| 7 #include "base/basictypes.h" | 7 #include "base/basictypes.h" | 
| 8 #include "base/strings/utf_string_conversions.h" | 8 #include "base/strings/utf_string_conversions.h" | 
| 9 #include "content/common/speech_recognition_messages.h" | 9 #include "content/common/speech_recognition_messages.h" | 
| 10 #include "content/renderer/render_view_impl.h" | 10 #include "content/renderer/render_view_impl.h" | 
| 11 #include "content/renderer/speech_recognition_audio_source_provider.h" | |
| 11 #include "third_party/WebKit/public/platform/WebString.h" | 12 #include "third_party/WebKit/public/platform/WebString.h" | 
| 12 #include "third_party/WebKit/public/platform/WebVector.h" | 13 #include "third_party/WebKit/public/platform/WebVector.h" | 
| 13 #include "third_party/WebKit/public/web/WebSpeechGrammar.h" | 14 #include "third_party/WebKit/public/web/WebSpeechGrammar.h" | 
| 14 #include "third_party/WebKit/public/web/WebSpeechRecognitionParams.h" | 15 #include "third_party/WebKit/public/web/WebSpeechRecognitionParams.h" | 
| 15 #include "third_party/WebKit/public/web/WebSpeechRecognitionResult.h" | 16 #include "third_party/WebKit/public/web/WebSpeechRecognitionResult.h" | 
| 16 #include "third_party/WebKit/public/web/WebSpeechRecognizerClient.h" | 17 #include "third_party/WebKit/public/web/WebSpeechRecognizerClient.h" | 
| 17 | 18 | 
| 18 using blink::WebVector; | 19 using blink::WebVector; | 
| 19 using blink::WebString; | 20 using blink::WebString; | 
| 20 using blink::WebSpeechGrammar; | 21 using blink::WebSpeechGrammar; | 
| 21 using blink::WebSpeechRecognitionHandle; | 22 using blink::WebSpeechRecognitionHandle; | 
| 22 using blink::WebSpeechRecognitionResult; | 23 using blink::WebSpeechRecognitionResult; | 
| 23 using blink::WebSpeechRecognitionParams; | 24 using blink::WebSpeechRecognitionParams; | 
| 24 using blink::WebSpeechRecognizerClient; | 25 using blink::WebSpeechRecognizerClient; | 
| 25 | 26 | 
| 26 namespace content { | 27 namespace content { | 
| 27 | 28 | 
| 28 SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( | 29 SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( | 
| 29 RenderViewImpl* render_view) | 30 RenderViewImpl* render_view) | 
| 30 : RenderViewObserver(render_view), | 31 : RenderViewObserver(render_view), | 
| 31 recognizer_client_(NULL), | 32 recognizer_client_(NULL), | 
| 33 audio_track_set_(false), | |
| 34 is_allowed_audio_track_(false), | |
| 35 render_loop_(base::MessageLoopProxy::current()), | |
| 32 next_id_(1) { | 36 next_id_(1) { | 
| 33 } | 37 } | 
| 34 | 38 | 
| 35 SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { | 39 SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { | 
| 36 } | 40 } | 
| 37 | 41 | 
| 38 void SpeechRecognitionDispatcher::AbortAllRecognitions() { | 42 void SpeechRecognitionDispatcher::AbortAllRecognitions() { | 
| 43 audio_source_provider_.reset(); | |
| 39 Send(new SpeechRecognitionHostMsg_AbortAllRequests( | 44 Send(new SpeechRecognitionHostMsg_AbortAllRequests( | 
| 40 routing_id())); | 45 routing_id())); | 
| 41 } | 46 } | 
| 42 | 47 | 
| 43 bool SpeechRecognitionDispatcher::OnMessageReceived( | 48 bool SpeechRecognitionDispatcher::OnMessageReceived( | 
| 44 const IPC::Message& message) { | 49 const IPC::Message& message) { | 
| 45 bool handled = true; | 50 bool handled = true; | 
| 46 IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message) | 51 IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message) | 
| 47 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted) | 52 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted) | 
| 48 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted) | 53 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted) | 
| 49 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted) | 54 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted) | 
| 50 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded) | 55 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded) | 
| 51 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded) | 56 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded) | 
| 52 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred, OnErrorOccurred) | 57 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred, OnErrorOccurred) | 
| 53 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) | 58 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) | 
| 54 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, | 59 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, | 
| 55 OnResultsRetrieved) | 60 OnResultsRetrieved) | 
| 61 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioTrackReady, | |
| 62 OnAudioTrackReady) | |
| 56 IPC_MESSAGE_UNHANDLED(handled = false) | 63 IPC_MESSAGE_UNHANDLED(handled = false) | 
| 57 IPC_END_MESSAGE_MAP() | 64 IPC_END_MESSAGE_MAP() | 
| 58 return handled; | 65 return handled; | 
| 59 } | 66 } | 
| 60 | 67 | 
| 68 void SpeechRecognitionDispatcher::attach( | |
| 69 const blink::WebSpeechRecognitionHandle& handle, | |
| 70 const blink::WebMediaStreamTrack& audio_track, | |
| 71 blink::WebSpeechRecognizerClient* recognizer_client) { | |
| 72 | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
remove this empty line
 
burnik
2014/08/29 13:26:17
Done.
 | |
| 73 // Check if track is from an allowed source (microphone only for now) | |
| 74 is_allowed_audio_track_ = | |
| 75 SpeechRecognitionAudioSourceProvider::IsAllowedAudioTrack(audio_track); | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
would it make sense to have the IsAllowedAudioTrac
 
burnik
2014/08/29 13:26:18
My intention is to move away as much logic as poss
 | |
| 76 audio_track_ = audio_track; | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
first DCHECK that audio_track_ isn't valid?
 
burnik
2014/08/29 13:26:17
Method needs refactoring.
On 2014/08/29 11:25:31,
 | |
| 77 audio_track_set_ = true; | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
Should we DCHECK(!audio_track_set_); at the top of
 | |
| 78 } | |
| 79 | |
| 80 void SpeechRecognitionDispatcher::detach( | |
| 81 const blink::WebSpeechRecognitionHandle& handle, | |
| 82 blink::WebSpeechRecognizerClient* recognizer_client) { | |
| 83 audio_track_set_ = false; | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
reset/clear audio_track_ as well and set is_allowe
 
burnik
2014/08/29 13:26:17
Method needs refactoring.
On 2014/08/29 11:25:31,
 | |
| 84 } | |
| 85 | |
| 61 void SpeechRecognitionDispatcher::start( | 86 void SpeechRecognitionDispatcher::start( | 
| 62 const WebSpeechRecognitionHandle& handle, | 87 const WebSpeechRecognitionHandle& handle, | 
| 63 const WebSpeechRecognitionParams& params, | 88 const WebSpeechRecognitionParams& params, | 
| 64 WebSpeechRecognizerClient* recognizer_client) { | 89 WebSpeechRecognizerClient* recognizer_client) { | 
| 65 DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); | 90 DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); | 
| 66 recognizer_client_ = recognizer_client; | 91 recognizer_client_ = recognizer_client; | 
| 67 | 92 | 
| 93 // destroy any previous instance not to starve it waiting on chunk ACKs | |
| 
no longer working on chromium
2014/08/29 12:23:07
Destroy
 
burnik
2014/08/29 13:26:17
Done.
 | |
| 94 audio_source_provider_.reset(); | |
| 95 | |
| 96 if (audio_track_set_ && !is_allowed_audio_track_) { | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
do you really need is_allowed_audio_track_?  If th
 
burnik
2014/08/29 13:26:18
The JS |webkitSpeechRecognition.audioTrack| is not
 | |
| 97 // notify user that the track used is not supported | |
| 98 recognizer_client_->didReceiveError( | |
| 99 handle, | |
| 100 WebString("Provided audioTrack is not supported. Ignoring track."), | |
| 101 WebSpeechRecognizerClient::NotAllowedError); | |
| 
no longer working on chromium
2014/08/29 12:23:07
probably you should fail the start call in such ca
 
burnik
2014/08/29 13:26:17
Good for discussion.
On 2014/08/29 12:23:07, xians
 | |
| 102 } | |
| 103 | |
| 68 SpeechRecognitionHostMsg_StartRequest_Params msg_params; | 104 SpeechRecognitionHostMsg_StartRequest_Params msg_params; | 
| 69 for (size_t i = 0; i < params.grammars().size(); ++i) { | 105 for (size_t i = 0; i < params.grammars().size(); ++i) { | 
| 70 const WebSpeechGrammar& grammar = params.grammars()[i]; | 106 const WebSpeechGrammar& grammar = params.grammars()[i]; | 
| 71 msg_params.grammars.push_back( | 107 msg_params.grammars.push_back( | 
| 72 SpeechRecognitionGrammar(grammar.src().spec(), grammar.weight())); | 108 SpeechRecognitionGrammar(grammar.src().spec(), grammar.weight())); | 
| 73 } | 109 } | 
| 74 msg_params.language = base::UTF16ToUTF8(params.language()); | 110 msg_params.language = base::UTF16ToUTF8(params.language()); | 
| 75 msg_params.max_hypotheses = static_cast<uint32>(params.maxAlternatives()); | 111 msg_params.max_hypotheses = static_cast<uint32>(params.maxAlternatives()); | 
| 76 msg_params.continuous = params.continuous(); | 112 msg_params.continuous = params.continuous(); | 
| 77 msg_params.interim_results = params.interimResults(); | 113 msg_params.interim_results = params.interimResults(); | 
| 78 msg_params.origin_url = params.origin().toString().utf8(); | 114 msg_params.origin_url = params.origin().toString().utf8(); | 
| 79 msg_params.render_view_id = routing_id(); | 115 msg_params.render_view_id = routing_id(); | 
| 80 msg_params.request_id = GetOrCreateIDForHandle(handle); | 116 msg_params.request_id = GetOrCreateIDForHandle(handle); | 
| 117 // fall back to default input when the track is not allowed | |
| 118 msg_params.using_audio_track = (audio_track_set_ && is_allowed_audio_track_); | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
looks like you always test these variables togethe
 
burnik
2014/08/29 13:26:18
Same as comment above. The JS API behaviour is not
 | |
| 119 msg_params.peer_process_handle = base::GetCurrentProcessHandle(); | |
| 81 // The handle mapping will be removed in |OnRecognitionEnd|. | 120 // The handle mapping will be removed in |OnRecognitionEnd|. | 
| 82 Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); | 121 Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); | 
| 83 } | 122 } | 
| 84 | 123 | 
| 85 void SpeechRecognitionDispatcher::stop( | 124 void SpeechRecognitionDispatcher::stop( | 
| 86 const WebSpeechRecognitionHandle& handle, | 125 const WebSpeechRecognitionHandle& handle, | 
| 87 WebSpeechRecognizerClient* recognizer_client) { | 126 WebSpeechRecognizerClient* recognizer_client) { | 
| 127 audio_source_provider_.reset(); | |
| 88 // Ignore a |stop| issued without a matching |start|. | 128 // Ignore a |stop| issued without a matching |start|. | 
| 89 if (recognizer_client_ != recognizer_client || !HandleExists(handle)) | 129 if (recognizer_client_ != recognizer_client || !HandleExists(handle)) | 
| 90 return; | 130 return; | 
| 91 Send(new SpeechRecognitionHostMsg_StopCaptureRequest( | 131 Send(new SpeechRecognitionHostMsg_StopCaptureRequest( | 
| 92 routing_id(), GetOrCreateIDForHandle(handle))); | 132 routing_id(), GetOrCreateIDForHandle(handle))); | 
| 93 } | 133 } | 
| 94 | 134 | 
| 95 void SpeechRecognitionDispatcher::abort( | 135 void SpeechRecognitionDispatcher::abort( | 
| 96 const WebSpeechRecognitionHandle& handle, | 136 const WebSpeechRecognitionHandle& handle, | 
| 97 WebSpeechRecognizerClient* recognizer_client) { | 137 WebSpeechRecognizerClient* recognizer_client) { | 
| 138 audio_source_provider_.reset(); | |
| 98 // Ignore an |abort| issued without a matching |start|. | 139 // Ignore an |abort| issued without a matching |start|. | 
| 99 if (recognizer_client_ != recognizer_client || !HandleExists(handle)) | 140 if (recognizer_client_ != recognizer_client || !HandleExists(handle)) | 
| 100 return; | 141 return; | 
| 101 Send(new SpeechRecognitionHostMsg_AbortRequest( | 142 Send(new SpeechRecognitionHostMsg_AbortRequest( | 
| 102 routing_id(), GetOrCreateIDForHandle(handle))); | 143 routing_id(), GetOrCreateIDForHandle(handle))); | 
| 103 } | 144 } | 
| 104 | 145 | 
| 105 void SpeechRecognitionDispatcher::OnRecognitionStarted(int request_id) { | 146 void SpeechRecognitionDispatcher::OnRecognitionStarted(int request_id) { | 
| 106 recognizer_client_->didStart(GetHandleFromID(request_id)); | 147 recognizer_client_->didStart(GetHandleFromID(request_id)); | 
| 107 } | 148 } | 
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 147 NOTREACHED(); | 188 NOTREACHED(); | 
| 148 return WebSpeechRecognizerClient::OtherError; | 189 return WebSpeechRecognizerClient::OtherError; | 
| 149 } | 190 } | 
| 150 | 191 | 
| 151 void SpeechRecognitionDispatcher::OnErrorOccurred( | 192 void SpeechRecognitionDispatcher::OnErrorOccurred( | 
| 152 int request_id, const SpeechRecognitionError& error) { | 193 int request_id, const SpeechRecognitionError& error) { | 
| 153 if (error.code == SPEECH_RECOGNITION_ERROR_NO_MATCH) { | 194 if (error.code == SPEECH_RECOGNITION_ERROR_NO_MATCH) { | 
| 154 recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id), | 195 recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id), | 
| 155 WebSpeechRecognitionResult()); | 196 WebSpeechRecognitionResult()); | 
| 156 } else { | 197 } else { | 
| 198 audio_source_provider_.reset(); | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
I'm assuming that the browser side will be aware o
 
burnik
2014/08/29 13:26:18
This message is received from the browser. Sockets
 | |
| 157 recognizer_client_->didReceiveError( | 199 recognizer_client_->didReceiveError( | 
| 158 GetHandleFromID(request_id), | 200 GetHandleFromID(request_id), | 
| 159 WebString(), // TODO(primiano): message? | 201 WebString(), // TODO(primiano): message? | 
| 160 WebKitErrorCode(error.code)); | 202 WebKitErrorCode(error.code)); | 
| 161 } | 203 } | 
| 162 } | 204 } | 
| 163 | 205 | 
| 164 void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) { | 206 void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) { | 
| 165 // TODO(tommi): It is possible that the handle isn't found in the array if | 207 // TODO(tommi): It is possible that the handle isn't found in the array if | 
| 166 // the user just refreshed the page. It seems that we then get a notification | 208 // the user just refreshed the page. It seems that we then get a notification | 
| 167 // for the previously loaded instance of the page. | 209 // for the previously loaded instance of the page. | 
| 168 HandleMap::iterator iter = handle_map_.find(request_id); | 210 HandleMap::iterator iter = handle_map_.find(request_id); | 
| 169 if (iter == handle_map_.end()) { | 211 if (iter == handle_map_.end()) { | 
| 170 DLOG(ERROR) << "OnRecognitionEnded called for a handle that doesn't exist"; | 212 DLOG(ERROR) << "OnRecognitionEnded called for a handle that doesn't exist"; | 
| 171 } else { | 213 } else { | 
| 172 WebSpeechRecognitionHandle handle = iter->second; | 214 WebSpeechRecognitionHandle handle = iter->second; | 
| 173 // Note: we need to erase the handle from the map *before* calling didEnd. | 215 // Note: we need to erase the handle from the map *before* calling didEnd. | 
| 174 // didEnd may call back synchronously to start a new recognition session, | 216 // didEnd may call back synchronously to start a new recognition session, | 
| 175 // and we don't want to delete the handle from the map after that happens. | 217 // and we don't want to delete the handle from the map after that happens. | 
| 176 handle_map_.erase(request_id); | 218 handle_map_.erase(request_id); | 
| 219 audio_source_provider_.reset(); | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
same question here since this feels functionally c
 
burnik
2014/08/29 13:26:17
This effectively kills of the sockets so the audio
 | |
| 177 recognizer_client_->didEnd(handle); | 220 recognizer_client_->didEnd(handle); | 
| 178 } | 221 } | 
| 179 } | 222 } | 
| 180 | 223 | 
| 181 void SpeechRecognitionDispatcher::OnResultsRetrieved( | 224 void SpeechRecognitionDispatcher::OnResultsRetrieved( | 
| 182 int request_id, const SpeechRecognitionResults& results) { | 225 int request_id, const SpeechRecognitionResults& results) { | 
| 183 size_t provisional_count = 0; | 226 size_t provisional_count = 0; | 
| 184 SpeechRecognitionResults::const_iterator it = results.begin(); | 227 SpeechRecognitionResults::const_iterator it = results.begin(); | 
| 185 for (; it != results.end(); ++it) { | 228 for (; it != results.end(); ++it) { | 
| 186 if (it->is_provisional) | 229 if (it->is_provisional) | 
| (...skipping 17 matching lines...) Expand all Loading... | |
| 204 transcripts[i] = result.hypotheses[i].utterance; | 247 transcripts[i] = result.hypotheses[i].utterance; | 
| 205 confidences[i] = static_cast<float>(result.hypotheses[i].confidence); | 248 confidences[i] = static_cast<float>(result.hypotheses[i].confidence); | 
| 206 } | 249 } | 
| 207 webkit_result->assign(transcripts, confidences, !result.is_provisional); | 250 webkit_result->assign(transcripts, confidences, !result.is_provisional); | 
| 208 } | 251 } | 
| 209 | 252 | 
| 210 recognizer_client_->didReceiveResults( | 253 recognizer_client_->didReceiveResults( | 
| 211 GetHandleFromID(request_id), final, provisional); | 254 GetHandleFromID(request_id), final, provisional); | 
| 212 } | 255 } | 
| 213 | 256 | 
| 257 void SpeechRecognitionDispatcher::OnAudioTrackReady( | |
| 258 int request_id, | |
| 259 const media::AudioParameters& params, | |
| 260 base::SharedMemoryHandle memory, | |
| 261 base::NativeSyncSocket::Descriptor socket, | |
| 262 uint32 length) { | |
| 263 // TODO(burnik): Log and DCHECK(!audio_source_provider_). | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
can you do this now?
 
burnik
2014/08/29 13:26:18
It's still a bit fuzzy because we can only have on
 | |
| 264 if (audio_track_.isNull()) { | |
| 265 audio_source_provider_.reset(); | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
should this be done in detach() also?
 
burnik
2014/08/29 13:26:17
I would have to dig in deeper to check if this wou
 | |
| 266 return; | |
| 267 } | |
| 268 audio_source_provider_.reset(new SpeechRecognitionAudioSourceProvider( | |
| 269 audio_track_, params, memory, socket, length)); | |
| 
tommi (sloooow) - chröme
2014/08/29 11:25:31
indent
 
burnik
2014/08/29 13:26:17
Done.
 | |
| 270 } | |
| 214 | 271 | 
| 215 int SpeechRecognitionDispatcher::GetOrCreateIDForHandle( | 272 int SpeechRecognitionDispatcher::GetOrCreateIDForHandle( | 
| 216 const WebSpeechRecognitionHandle& handle) { | 273 const WebSpeechRecognitionHandle& handle) { | 
| 217 // Search first for an existing mapping. | 274 // Search first for an existing mapping. | 
| 218 for (HandleMap::iterator iter = handle_map_.begin(); | 275 for (HandleMap::iterator iter = handle_map_.begin(); | 
| 219 iter != handle_map_.end(); | 276 iter != handle_map_.end(); | 
| 220 ++iter) { | 277 ++iter) { | 
| 221 if (iter->second.equals(handle)) | 278 if (iter->second.equals(handle)) | 
| 222 return iter->first; | 279 return iter->first; | 
| 223 } | 280 } | 
| (...skipping 16 matching lines...) Expand all Loading... | |
| 240 } | 297 } | 
| 241 | 298 | 
| 242 const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( | 299 const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( | 
| 243 int request_id) { | 300 int request_id) { | 
| 244 HandleMap::iterator iter = handle_map_.find(request_id); | 301 HandleMap::iterator iter = handle_map_.find(request_id); | 
| 245 DCHECK(iter != handle_map_.end()); | 302 DCHECK(iter != handle_map_.end()); | 
| 246 return iter->second; | 303 return iter->second; | 
| 247 } | 304 } | 
| 248 | 305 | 
| 249 } // namespace content | 306 } // namespace content | 
| OLD | NEW |