Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2012 Google Inc. All rights reserved. | 2 * Copyright (C) 2012 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * * Redistributions of source code must retain the above copyright | 7 * * Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * * Redistributions in binary form must reproduce the above copyright | 9 * * Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 33 #include "core/page/Page.h" | 33 #include "core/page/Page.h" |
| 34 #include "modules/mediastream/MediaStreamTrack.h" | 34 #include "modules/mediastream/MediaStreamTrack.h" |
| 35 #include "modules/speech/SpeechRecognitionController.h" | 35 #include "modules/speech/SpeechRecognitionController.h" |
| 36 #include "modules/speech/SpeechRecognitionError.h" | 36 #include "modules/speech/SpeechRecognitionError.h" |
| 37 #include "modules/speech/SpeechRecognitionEvent.h" | 37 #include "modules/speech/SpeechRecognitionEvent.h" |
| 38 | 38 |
| 39 namespace blink { | 39 namespace blink { |
| 40 | 40 |
| 41 SpeechRecognition* SpeechRecognition::create(ExecutionContext* context) | 41 SpeechRecognition* SpeechRecognition::create(ExecutionContext* context) |
| 42 { | 42 { |
| 43 SpeechRecognition* speechRecognition = new SpeechRecognition(context); | 43 ASSERT(context && context->isDocument()); |
| 44 Document* document = toDocument(context); | |
| 45 ASSERT(document); | |
| 46 SpeechRecognition* speechRecognition = new SpeechRecognition(document->page( ), context); | |
|
haraken
2015/02/27 11:29:53
I think executionContext must not be null but docu
sof
2015/02/27 11:33:42
Yes; the supplement's from() would return a nullpt
| |
| 44 speechRecognition->suspendIfNeeded(); | 47 speechRecognition->suspendIfNeeded(); |
| 45 return speechRecognition; | 48 return speechRecognition; |
| 46 } | 49 } |
| 47 | 50 |
| 48 void SpeechRecognition::start(ExceptionState& exceptionState) | 51 void SpeechRecognition::start(ExceptionState& exceptionState) |
| 49 { | 52 { |
| 50 ASSERT(m_controller); | 53 if (!m_controller) |
| 54 return; | |
| 55 | |
| 51 if (m_started) { | 56 if (m_started) { |
| 52 exceptionState.throwDOMException(InvalidStateError, "recognition has alr eady started."); | 57 exceptionState.throwDOMException(InvalidStateError, "recognition has alr eady started."); |
| 53 return; | 58 return; |
| 54 } | 59 } |
| 55 | 60 |
| 56 m_finalResults.clear(); | 61 m_finalResults.clear(); |
| 57 m_controller->start(this, m_grammars, m_lang, m_continuous, m_interimResults , m_maxAlternatives, m_audioTrack); | 62 m_controller->start(this, m_grammars, m_lang, m_continuous, m_interimResults , m_maxAlternatives, m_audioTrack); |
| 58 m_started = true; | 63 m_started = true; |
| 59 } | 64 } |
| 60 | 65 |
| 61 void SpeechRecognition::stopFunction() | 66 void SpeechRecognition::stopFunction() |
| 62 { | 67 { |
| 63 ASSERT(m_controller); | 68 if (!m_controller) |
| 69 return; | |
| 70 | |
| 64 if (m_started && !m_stopping) { | 71 if (m_started && !m_stopping) { |
| 65 m_stopping = true; | 72 m_stopping = true; |
| 66 m_controller->stop(this); | 73 m_controller->stop(this); |
| 67 } | 74 } |
| 68 } | 75 } |
| 69 | 76 |
| 70 void SpeechRecognition::abort() | 77 void SpeechRecognition::abort() |
| 71 { | 78 { |
| 72 ASSERT(m_controller); | 79 if (!m_controller) |
| 80 return; | |
| 81 | |
| 73 if (m_started && !m_stopping) { | 82 if (m_started && !m_stopping) { |
| 74 m_stopping = true; | 83 m_stopping = true; |
| 75 m_controller->abort(this); | 84 m_controller->abort(this); |
| 76 } | 85 } |
| 77 } | 86 } |
| 78 | 87 |
| 79 void SpeechRecognition::didStartAudio() | 88 void SpeechRecognition::didStartAudio() |
| 80 { | 89 { |
| 81 dispatchEvent(Event::create(EventTypeNames::audiostart)); | 90 dispatchEvent(Event::create(EventTypeNames::audiostart)); |
| 82 } | 91 } |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 159 m_stoppedByActiveDOMObject = true; | 168 m_stoppedByActiveDOMObject = true; |
| 160 if (hasPendingActivity()) | 169 if (hasPendingActivity()) |
| 161 abort(); | 170 abort(); |
| 162 } | 171 } |
| 163 | 172 |
| 164 bool SpeechRecognition::hasPendingActivity() const | 173 bool SpeechRecognition::hasPendingActivity() const |
| 165 { | 174 { |
| 166 return m_started; | 175 return m_started; |
| 167 } | 176 } |
| 168 | 177 |
| 169 SpeechRecognition::SpeechRecognition(ExecutionContext* context) | 178 SpeechRecognition::SpeechRecognition(Page* page, ExecutionContext* context) |
| 170 : ActiveDOMObject(context) | 179 : PageLifecycleObserver(page) |
| 180 , ActiveDOMObject(context) | |
| 171 , m_grammars(SpeechGrammarList::create()) // FIXME: The spec is not clear on the default value for the grammars attribute. | 181 , m_grammars(SpeechGrammarList::create()) // FIXME: The spec is not clear on the default value for the grammars attribute. |
| 172 , m_audioTrack(nullptr) | 182 , m_audioTrack(nullptr) |
| 173 , m_continuous(false) | 183 , m_continuous(false) |
| 174 , m_interimResults(false) | 184 , m_interimResults(false) |
| 175 , m_maxAlternatives(1) | 185 , m_maxAlternatives(1) |
| 176 , m_controller(nullptr) | 186 , m_controller(SpeechRecognitionController::from(page)) |
| 177 , m_stoppedByActiveDOMObject(false) | 187 , m_stoppedByActiveDOMObject(false) |
| 178 , m_started(false) | 188 , m_started(false) |
| 179 , m_stopping(false) | 189 , m_stopping(false) |
| 180 { | 190 { |
| 181 Document* document = toDocument(executionContext()); | |
| 182 | |
| 183 Page* page = document->page(); | |
| 184 ASSERT(page); | |
| 185 | |
| 186 m_controller = SpeechRecognitionController::from(page); | |
| 187 ASSERT(m_controller); | |
| 188 | |
| 189 // FIXME: Need to hook up with Page to get notified when the visibility chan ges. | 191 // FIXME: Need to hook up with Page to get notified when the visibility chan ges. |
| 190 } | 192 } |
| 191 | 193 |
| 192 SpeechRecognition::~SpeechRecognition() | 194 SpeechRecognition::~SpeechRecognition() |
| 193 { | 195 { |
| 194 } | 196 } |
| 195 | 197 |
| 198 void SpeechRecognition::contextDestroyed() | |
| 199 { | |
| 200 m_controller = nullptr; | |
| 201 PageLifecycleObserver::contextDestroyed(); | |
| 202 } | |
| 203 | |
| 196 DEFINE_TRACE(SpeechRecognition) | 204 DEFINE_TRACE(SpeechRecognition) |
| 197 { | 205 { |
| 198 visitor->trace(m_grammars); | 206 visitor->trace(m_grammars); |
| 199 visitor->trace(m_audioTrack); | 207 visitor->trace(m_audioTrack); |
| 200 #if ENABLE(OILPAN) | 208 #if ENABLE(OILPAN) |
| 201 visitor->trace(m_controller); | 209 visitor->trace(m_controller); |
| 202 #endif | 210 #endif |
| 203 visitor->trace(m_finalResults); | 211 visitor->trace(m_finalResults); |
| 204 RefCountedGarbageCollectedEventTargetWithInlineData<SpeechRecognition>::trac e(visitor); | 212 RefCountedGarbageCollectedEventTargetWithInlineData<SpeechRecognition>::trac e(visitor); |
| 213 PageLifecycleObserver::trace(visitor); | |
| 205 ActiveDOMObject::trace(visitor); | 214 ActiveDOMObject::trace(visitor); |
| 206 } | 215 } |
| 207 | 216 |
| 208 } // namespace blink | 217 } // namespace blink |
| OLD | NEW |